diff --git a/eng/versioning/version_client.txt b/eng/versioning/version_client.txt index 78057004bcff..2ea3c152407e 100644 --- a/eng/versioning/version_client.txt +++ b/eng/versioning/version_client.txt @@ -40,6 +40,7 @@ com.azure:azure-ai-agents-persistent;1.0.0-beta.2;1.0.0-beta.3 com.azure:azure-ai-agents;1.0.0-beta.1;1.0.0-beta.2 com.azure:azure-ai-anomalydetector;3.0.0-beta.5;3.0.0-beta.6 com.azure:azure-ai-contentsafety;1.0.17;1.1.0-beta.1 +com.azure:azure-ai-contentunderstanding;1.0.0-beta.1;1.0.0-beta.1 com.azure:azure-ai-documentintelligence;1.0.7;1.1.0-beta.1 com.azure:azure-ai-documenttranslator;1.0.0-beta.1;1.0.0-beta.2 com.azure:azure-ai-formrecognizer;4.1.13;4.2.0-beta.1 diff --git a/pom.xml b/pom.xml index 6c9d431da122..c2b0abc325c2 100644 --- a/pom.xml +++ b/pom.xml @@ -66,6 +66,7 @@ sdk/containerservicefleet sdk/containerservicesafeguards sdk/contentsafety + sdk/contentunderstanding sdk/core sdk/core-v2 sdk/cosmos diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/java-cu-create-async-sample/SKILL.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/java-cu-create-async-sample/SKILL.md new file mode 100644 index 000000000000..a606da9255bd --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/java-cu-create-async-sample/SKILL.md @@ -0,0 +1,210 @@ +--- +name: java-cu-create-async-sample +description: | + Creates or updates async samples for Content Understanding SDK with reactive patterns. + + This skill helps you: + - Convert sync samples to async versions + - Apply reactive patterns (Mono/Flux, flatMap, doOnNext, subscribe) + - Ensure 100% functionality parity between sync and async + - Report any non-portable code + + Trigger phrases: "create async sample", "convert to async", "sync to async", "generate async version" +--- + +# Java CU Create Async Sample + +This skill creates or updates async samples for the Content Understanding SDK, ensuring they use proper reactive patterns and maintain 100% functionality parity with their sync counterparts. + +## Workflow + +### Step 1: Enumerate Sync Samples + +1. List all sync samples in `src/samples/java/com/azure/ai/contentunderstanding/samples/` +2. Filter for files matching pattern: `Sample*.java` (excluding `*Async.java`) +3. For each sync sample, identify the corresponding async sample (if exists): + - Sync: `SampleXX_Name.java` + - Async: `SampleXX_NameAsync.java` + +### Step 2: Read Reference Documentation + +Before converting, read the async-patterns.md reference document in the `references/` directory for: + +- Reactive programming concepts (Mono, Flux, flatMap, doOnNext, subscribe) +- Conversion patterns and examples +- Common pitfalls to avoid + +### Step 3: Convert Each Sample + +For each sync sample: + +1. **Read the sync sample** to understand its functionality +2. **Check if async version exists**: + - If exists: Read and compare with sync version + - If missing: Create new async version +3. **Identify conversion points**: + - Client: `ContentUnderstandingClient` → `ContentUnderstandingAsyncClient` + - Methods: Direct calls → Reactive chains + - Return types: Direct values → `Mono` or `Flux` + - PollerFlux: Use reactive pattern (`.last().flatMap().subscribe()`) +4. **Apply reactive patterns** (see the async-patterns.md reference document): + - Use `flatMap()` for sequential async operations + - Use `doOnNext()` for side effects (printing) + - Use `subscribe()` to start execution + - Add `TimeUnit.sleep()` to prevent premature exit +5. **Verify functionality parity**: + - Same operations in same order + - Same output messages + - Same error handling + - Same helper methods + +### Step 4: Report Issues + +If something cannot be ported: + +1. **Document the issue** clearly +2. **Explain why** it cannot be ported +3. **Ask the user** for guidance if needed + +## Conversion Patterns + +### Pattern 1: Simple Operations (Mono) + +**Sync:** + +```java +ContentUnderstandingDefaults defaults = client.getDefaults(); +System.out.println("Defaults: " + defaults); +``` + +**Async:** + +```java +client.getDefaults() + .doOnNext(defaults -> System.out.println("Defaults: " + defaults)) + .subscribe(); +``` + +### Pattern 2: Sequential Operations + +**Sync:** + +```java +ContentUnderstandingDefaults current = client.getDefaults(); +ContentUnderstandingDefaults updated = client.updateDefaults(map); +ContentUnderstandingDefaults verified = client.getDefaults(); +``` + +**Async:** + +```java +client.getDefaults() + .flatMap(current -> client.updateDefaults(map)) + .flatMap(updated -> client.getDefaults()) + .doOnNext(verified -> System.out.println("Verified: " + verified)) + .subscribe(); +``` + +### Pattern 3: PollerFlux Operations + +**Sync:** + +```java +SyncPoller poller = client.beginAnalyze(...); +Result result = poller.getFinalResult(); +``` + +**Async:** + +```java +PollerFlux poller = client.beginAnalyze(...); +poller.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException("Operation failed")); + } + }) + .subscribe(result -> { + // Process result + }); +``` + +### Pattern 4: Error Handling + +**Sync:** + +```java +try { + ContentUnderstandingDefaults defaults = client.getDefaults(); +} catch (Exception e) { + System.err.println("Error: " + e.getMessage()); +} +``` + +**Async:** + +```java +client.getDefaults() + .doOnError(error -> System.err.println("Error: " + error.getMessage())) + .subscribe( + result -> { /* success */ }, + error -> System.exit(1) + ); +``` + +## Naming Conventions + +- Sync sample: `SampleXX_Name.java` +- Async sample: `SampleXX_NameAsync.java` +- Package: `com.azure.ai.contentunderstanding.samples` +- Class name matches file name + +## Required Imports for Async Samples + +```java +import reactor.core.publisher.Mono; +import java.util.concurrent.TimeUnit; +``` + +## Validation Checklist + +Before finalizing an async sample, verify: + +- [ ] Uses `ContentUnderstandingAsyncClient` (not sync client) +- [ ] No `.block()` calls (except in retry loops if necessary) +- [ ] Uses reactive chaining (`flatMap`, `then`) for sequential operations +- [ ] Uses `doOnNext()` for side effects (printing) +- [ ] Uses `subscribe()` to start execution +- [ ] Includes `TimeUnit.sleep()` to prevent premature exit +- [ ] Same functionality as sync version (100% parity) +- [ ] Same output messages and formatting +- [ ] Same helper methods (if any) +- [ ] Same error handling behavior +- [ ] Same comments and documentation + +## Non-Portable Patterns + +Report these as issues: + +1. **Blocking operations in loops**: May need special handling +2. **Synchronous file I/O**: May need to wrap in `Mono.fromCallable()` +3. **Thread.sleep()**: Should use `TimeUnit.sleep()` in reactive context +4. **Complex state management**: May need refactoring for reactive patterns + +## Questions to Ask + +If encountering non-portable code, ask: + +1. "How should we handle [specific blocking operation] in the async version?" +2. "Should [specific pattern] be refactored for reactive programming?" +3. "Is [specific functionality] required to be synchronous, or can it be async?" + +## Output + +After processing all samples, provide: + +1. **Summary**: Total sync samples, async samples created/updated +2. **Issues**: List of any non-portable code with explanations +3. **Questions**: Any questions that need user input diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/java-cu-create-async-sample/references/async-patterns.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/java-cu-create-async-sample/references/async-patterns.md new file mode 100644 index 000000000000..dfebdc506223 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/java-cu-create-async-sample/references/async-patterns.md @@ -0,0 +1,509 @@ +# Async Programming Reference for Content Understanding SDK + +This document provides reactive programming concepts and patterns for creating async samples in the Content Understanding SDK. + +## Introduction to Reactive Programming in Java + +### What is Reactive Programming? + +Reactive programming is a programming paradigm that focuses on asynchronous data streams and the propagation of change. In Java, reactive programming is implemented using **Project Reactor**, which provides `Mono` and `Flux` types for handling asynchronous operations. + +Unlike traditional synchronous programming where operations block threads waiting for results, reactive programming allows operations to be non-blocking and asynchronous. This means your code can start multiple operations and handle their results as they complete, rather than waiting for each one sequentially. + +### Why is Reactive Programming Required? + +1. **Non-Blocking Operations**: Reactive programming allows your application to handle multiple operations concurrently without blocking threads. This is especially important for I/O-bound operations like API calls, database queries, or file operations. + +2. **Better Resource Utilization**: Instead of blocking threads waiting for responses, reactive programming allows threads to be freed up to handle other tasks. This leads to better scalability and resource efficiency. + +3. **Composability**: Reactive streams can be easily composed, transformed, and chained together, making complex asynchronous workflows more readable and maintainable. + +4. **Backpressure Handling**: Reactive streams can handle backpressure (when a producer is faster than a consumer) automatically, preventing memory issues. + +5. **Azure SDK Standard**: Azure SDK for Java uses reactive programming for all async operations, providing a consistent pattern across all services. + +### How is Reactive Programming Commonly Used? + +In Azure SDK for Java and Content Understanding SDK specifically: + +- **API Calls**: All async client methods return `Mono` (for single results) or `Flux` (for collections/streams) +- **Long-Running Operations**: Operations like document analysis use `PollerFlux` which emits status updates over time +- **Chaining Operations**: Sequential operations are chained using `flatMap()` to ensure proper ordering +- **Error Handling**: Errors are propagated through the reactive stream and handled with `doOnError()` or error callbacks +- **Side Effects**: Operations like printing or logging are done with `doOnNext()` without blocking the stream + +### For .NET Developers: Java Reactive vs .NET async/await + +If you're coming from a .NET background, here's how Java reactive programming compares to .NET's async/await pattern: + +**Similarities:** + +- Both are designed for non-blocking, asynchronous operations +- Both handle I/O-bound operations efficiently +- Both allow composing multiple async operations + +**Key Differences:** + +| .NET async/await | Java Reactive (Project Reactor) | +|------------------|----------------------------------| +| `Task` / `Task` | `Mono` / `Mono` | +| `IAsyncEnumerable` | `Flux` | +| `await` keyword | `.block()` (avoid in async samples) or `.subscribe()` | +| Sequential: `var result = await operation()` | Sequential: `.flatMap(result -> nextOperation())` | +| `async` method modifier | No modifier needed - methods return `Mono`/`Flux` | +| `try/catch` for errors | `.doOnError()` or error callback in `.subscribe()` | +| `await Task.WhenAll()` | `Flux.merge()` or `Mono.zip()` | + +**Example Comparison:** + +**.NET (async/await):** + +```csharp +var current = await client.GetDefaultsAsync(); +var updated = await client.UpdateDefaultsAsync(map); +var verified = await client.GetDefaultsAsync(); +Console.WriteLine($"Verified: {verified}"); +``` + +**Java (Reactive):** + +```java +client.getDefaults() + .flatMap(current -> client.updateDefaults(map)) + .flatMap(updated -> client.getDefaults()) + .doOnNext(verified -> System.out.println("Verified: " + verified)) + .subscribe(); +``` + +**Key Takeaway:** In .NET, you use `await` to get values from async operations. In Java reactive, you chain operations with `flatMap()` and handle values in callbacks (`doOnNext()`, `subscribe()`). The Java approach is more functional and composable, but requires thinking in terms of streams and transformations rather than sequential await statements. + +### Key Principles + +1. **Lazy Execution**: Reactive streams don't execute until you call `subscribe()` - this allows you to build up complex chains before execution +2. **Immutable**: Each operator returns a new stream, keeping the original unchanged +3. **Non-Blocking**: Operations never block threads - they return immediately and process results asynchronously +4. **Composable**: Streams can be combined, filtered, transformed, and chained together + +### Example: Sync vs Reactive + +**Synchronous (Blocking):** + +```java +// Each call blocks the thread until complete +ContentUnderstandingDefaults current = client.getDefaults(); // Blocks here +ContentUnderstandingDefaults updated = client.updateDefaults(map); // Blocks here +System.out.println("Done"); +``` + +**Reactive (Non-Blocking):** + +```java +// Operations are chained and execute asynchronously +client.getDefaults() + .flatMap(current -> client.updateDefaults(map)) + .doOnNext(updated -> System.out.println("Done")) + .subscribe(); // Starts execution, doesn't block +``` + +The reactive version allows the thread to handle other work while waiting for API responses, making your application more efficient and scalable. + +## Core Concepts + +### Mono vs Flux + +**Mono**: Represents 0 or 1 value + +- Use for: Single API calls, get operations, update operations +- Example: `Mono getDefaults()` + +**Flux**: Represents 0 to N values + +- Use for: Collections, streams, PollerFlux +- Example: `Flux listAnalyzers()` + +### Key Operators + +#### subscribe() - Start Execution + +**Purpose**: Subscribes to a Mono/Flux and executes callbacks. + +**Example:** + +```java +client.getDefaults() + .subscribe( + result -> System.out.println("Got: " + result), // onNext + error -> System.err.println("Error: " + error), // onError + () -> System.out.println("Done!") // onComplete + ); +``` + +**Important**: Without `subscribe()`, nothing happens - reactive chains are lazy. + +#### doOnNext() - Side Effects + +**Purpose**: Perform side effects (like printing) without changing the value. + +**Example:** + +```java +client.getDefaults() + .doOnNext(defaults -> System.out.println("Current: " + defaults)) + .map(defaults -> defaults.getModelDeployments()) // Value passes through unchanged + .doOnNext(deployments -> System.out.println("Deployments: " + deployments)) + .subscribe(); +``` + +**Key point**: `doOnNext()` doesn't change the value - it just "peeks" at it. + +#### flatMap() - Chain Sequential Async Operations + +**Purpose**: Chain async operations where each returns a Mono/Flux. + +**Example:** + +```java +// Sequential operations +client.getDefaults() + .flatMap(current -> { + Map updates = buildUpdates(current); + return client.updateDefaults(updates); // Returns Mono + }) + .flatMap(updated -> { + return client.getDefaults(); // Returns Mono + }) + .subscribe(); +``` + +**When to use**: + +- Use `flatMap()` when the operation returns `Mono`/`Flux` (async) +- Use `map()` when the operation is synchronous (e.g., `toUpperCase()`) + +#### then() - Chain Without Using Previous Value + +**Purpose**: Chain operations when you don't need the previous value. + +**Example:** + +```java +client.updateDefaults(map) + .then(client.getDefaults()) // Don't need updated value, just chain + .subscribe(defaults -> System.out.println("Final: " + defaults)); +``` + +## Conversion Patterns + +### Pattern 1: Simple Get Operation + +**Sync:** + +```java +ContentUnderstandingDefaults defaults = client.getDefaults(); +System.out.println("Defaults: " + defaults); +``` + +**Async:** + +```java +client.getDefaults() + .doOnNext(defaults -> System.out.println("Defaults: " + defaults)) + .subscribe(); + +// Prevent premature exit +try { + TimeUnit.SECONDS.sleep(5); +} catch (InterruptedException e) { + Thread.currentThread().interrupt(); +} +``` + +### Pattern 2: Sequential Operations + +**Sync:** + +```java +ContentUnderstandingDefaults current = client.getDefaults(); +System.out.println("Current: " + current); + +Map updates = buildUpdates(); +ContentUnderstandingDefaults updated = client.updateDefaults(updates); +System.out.println("Updated: " + updated); + +ContentUnderstandingDefaults verified = client.getDefaults(); +System.out.println("Verified: " + verified); +``` + +**Async:** + +```java +client.getDefaults() + .doOnNext(current -> { + System.out.println("Current: " + current); + }) + .flatMap(current -> { + Map updates = buildUpdates(); + return client.updateDefaults(updates); + }) + .doOnNext(updated -> { + System.out.println("Updated: " + updated); + }) + .flatMap(updated -> { + return client.getDefaults(); + }) + .doOnNext(verified -> { + System.out.println("Verified: " + verified); + }) + .doOnError(error -> { + System.err.println("Error: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> System.out.println("Completed successfully"), + error -> System.exit(1) + ); + +try { + TimeUnit.SECONDS.sleep(10); +} catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); +} +``` + +### Pattern 3: PollerFlux (Long-Running Operations) + +**Sync:** + +```java +SyncPoller operation + = client.beginAnalyze("prebuilt-invoice", Arrays.asList(input)); + +AnalyzeResult result = operation.getFinalResult(); +System.out.println("Analysis completed"); +``` + +**Async:** + +```java +PollerFlux operation + = client.beginAnalyze("prebuilt-invoice", Arrays.asList(input)); + +operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + System.out.println("Analysis completed"); + }) + .doOnError(error -> { + System.err.println("Error: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { /* Success */ }, + error -> System.exit(1) + ); + +try { + TimeUnit.MINUTES.sleep(1); +} catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); +} +``` + +### Pattern 4: Error Handling + +**Sync:** + +```java +try { + ContentUnderstandingDefaults defaults = client.getDefaults(); + System.out.println("Success: " + defaults); +} catch (Exception e) { + System.err.println("Error: " + e.getMessage()); + e.printStackTrace(); +} +``` + +**Async:** + +```java +client.getDefaults() + .doOnNext(defaults -> { + System.out.println("Success: " + defaults); + }) + .doOnError(error -> { + System.err.println("Error: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { /* Success */ }, + error -> System.exit(1) + ); +``` + +### Pattern 5: Conditional Operations + +**Sync:** + +```java +ContentAnalyzer analyzer = client.getAnalyzer(analyzerId); +if (analyzer != null) { + System.out.println("Found: " + analyzer.getAnalyzerId()); +} else { + System.out.println("Not found"); +} +``` + +**Async:** + +```java +client.getAnalyzer(analyzerId) + .doOnNext(analyzer -> { + System.out.println("Found: " + analyzer.getAnalyzerId()); + }) + .switchIfEmpty(Mono.fromRunnable(() -> { + System.out.println("Not found"); + })) + .subscribe(); +``` + +## Common Mistakes to Avoid + +### ❌ Using .block() in Async Samples + +**Wrong:** + +```java +ContentUnderstandingDefaults defaults = client.getDefaults().block(); +``` + +**Correct:** + +```java +client.getDefaults() + .subscribe(defaults -> { /* use defaults */ }); +``` + +### ❌ Not Chaining Sequential Operations + +**Wrong:** + +```java +client.getDefaults().subscribe(current -> {}); +client.updateDefaults(map).subscribe(updated -> {}); // May execute before first completes +``` + +**Correct:** + +```java +client.getDefaults() + .flatMap(current -> client.updateDefaults(map)) + .subscribe(updated -> {}); +``` + +### ❌ Forgetting to Subscribe + +**Wrong:** + +```java +client.getDefaults() + .doOnNext(defaults -> System.out.println(defaults)); +// Nothing happens - chain is lazy! +``` + +**Correct:** + +```java +client.getDefaults() + .doOnNext(defaults -> System.out.println(defaults)) + .subscribe(); // Starts execution +``` + +### ❌ Not Preventing Premature Exit + +**Wrong:** + +```java +client.getDefaults() + .subscribe(defaults -> System.out.println(defaults)); +// Program exits before async operation completes +``` + +**Correct:** + +```java +client.getDefaults() + .subscribe(defaults -> System.out.println(defaults)); + +try { + TimeUnit.SECONDS.sleep(5); +} catch (InterruptedException e) { + Thread.currentThread().interrupt(); +} +``` + +## Real Examples from CU SDK + +### Example 1: UpdateDefaults (Simple Sequential) + +**Sync Pattern:** + +```java +ContentUnderstandingDefaults current = client.getDefaults(); +ContentUnderstandingDefaults updated = client.updateDefaults(map); +ContentUnderstandingDefaults verified = client.getDefaults(); +``` + +**Async Pattern:** + +```java +client.getDefaults() + .flatMap(current -> client.updateDefaults(map)) + .flatMap(updated -> client.getDefaults()) + .subscribe(verified -> {}); +``` + +### Example 2: Analyze Invoice (PollerFlux) + +**Sync Pattern:** + +```java +SyncPoller operation = client.beginAnalyze(...); +AnalyzeResult result = operation.getFinalResult(); +``` + +**Async Pattern:** + +```java +PollerFlux operation = client.beginAnalyze(...); +operation.last() + .flatMap(pollResponse -> pollResponse.getFinalResult()) + .subscribe(result -> {}); +``` + +## Required Imports + +```java +import reactor.core.publisher.Mono; +import reactor.core.publisher.Flux; +import java.util.concurrent.TimeUnit; +``` + +## Best Practices + +1. **Always use reactive chaining** for sequential operations +2. **Use `doOnNext()` for side effects** (printing, logging) +3. **Use `flatMap()` for async operations** that return Mono/Flux +4. **Always call `subscribe()`** to start execution +5. **Add sleep** to prevent premature program exit +6. **Handle errors** with `doOnError()` and error callback in `subscribe()` +7. **Match sync sample output** exactly for parity diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/review-cu-sample-quality/SKILL.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/review-cu-sample-quality/SKILL.md new file mode 100644 index 000000000000..666fe6bc9a81 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/review-cu-sample-quality/SKILL.md @@ -0,0 +1,300 @@ +--- +name: review-cu-sample-quality +description: Reviews Content Understanding SDK samples for code quality, correctness, and output accuracy. Enumerates sync/async sample pairs, runs samples to generate output, reviews code line-by-line for correctness and clarity, verifies output matches code behavior, checks comments for accuracy, and ensures async samples match sync functionality. Use when reviewing sample code quality, verifying sample correctness, or ensuring sync/async parity. +--- + +# Review CU Sample Quality + +This skill performs comprehensive quality review of Content Understanding SDK samples, ensuring code correctness, clarity, output accuracy, and sync/async parity. + +## When to Use + +- Reviewing sample code quality and correctness +- Verifying sample outputs match code behavior +- Ensuring sync and async samples have functional parity +- Checking comment accuracy and clarity +- Validating sample documentation matches implementation + +## Prerequisites + +1. **Clean working directory**: The skill requires an empty git change list before starting +2. **Sample output files**: Either existing output files in `target/sample_result_out_txt/` or ability to run samples +3. **CU SDK compiled**: Samples require the SDK to be compiled (handled automatically) + +## Workflow + +### Step 1: Verify Clean Working Directory + +**CRITICAL**: The skill must stop if there are uncommitted changes. + +```bash +git status --porcelain +``` + +If output is non-empty, **STOP** and inform the user that changes must be committed or stashed first. + +### Step 2: Enumerate Samples + +Discover all samples in `src/samples/java/com/azure/ai/contentunderstanding/samples/` and group them by sync/async pairs. + +**Sample naming pattern:** +- Sync: `Sample##_Name.java` +- Async: `Sample##_NameAsync.java` + +**Grouping logic:** +- Extract base name by removing `Async` suffix +- Group samples with same base number and name +- Example: `Sample00_UpdateDefaults` + `Sample00_UpdateDefaultsAsync` = one group + +**Output format:** +``` +Found N sample groups: + - Sample00_UpdateDefaults [sync: Sample00_UpdateDefaults.java] [async: Sample00_UpdateDefaultsAsync.java] + - Sample01_AnalyzeBinary [sync: Sample01_AnalyzeBinary.java] [async: Sample01_AnalyzeBinaryAsync.java] + ... +``` + +### Step 3: Ensure Sample Outputs Exist + +For each sample group, check if output files exist in `target/sample_result_out_txt/`: +- Sync: `target/sample_result_out_txt/Sample##_Name.out.txt` +- Async: `target/sample_result_out_txt/Sample##_NameAsync.out.txt` + +**If outputs missing:** +- Use the `run-all-samples` skill to generate outputs +- The skill will skip samples that already have outputs + +**If outputs exist:** +- Proceed to review (no need to re-run) + +### Step 4: Review Sync Samples + +For each sync sample, perform line-by-line review: + +#### 4.1 Understand Sample Purpose +- Read the class-level JavaDoc comment +- Understand what the sample demonstrates +- Identify key operations and expected behavior + +#### 4.2 Review Code Correctness +Check for: +- **Logic errors**: Incorrect API usage, wrong method calls, missing error handling +- **Type safety**: Correct types, proper casting, null handling +- **Resource management**: Proper cleanup, try-with-resources where needed +- **Best practices**: Following Java and Azure SDK conventions + +#### 4.3 Review Code Clarity +Check for: +- **Readability**: Clear variable names, logical flow, appropriate abstractions +- **Structure**: Well-organized code, appropriate method extraction +- **Comments**: Code is self-documenting or has helpful comments + +#### 4.4 Verify Output Matches Code +Compare the sample code with its output file: + +1. **Trace execution flow**: Follow the code path and identify what should be printed +2. **Check for missing output**: Verify all expected print statements appear in output +3. **Check for incorrect output**: Verify output values match code logic +4. **Check for unexpected output**: Look for errors, exceptions, or warnings not explained in code + +**Common issues:** +- Missing `System.out.println()` output +- Incorrect variable values in output +- Exception messages not matching code +- Missing error handling output + +#### 4.5 Make Code Changes +After identifying issues: +- Fix correctness problems +- Improve clarity where needed +- Ensure output matches code behavior +- Add missing error handling or output statements + +#### 4.6 Review Comments +Check both class-level JavaDoc and inline comments: + +**Class-level JavaDoc:** +- Accurately describes sample purpose +- Lists all demonstrated features +- Prerequisites are correct and complete +- Examples match actual code + +**Inline comments:** +- Code region markers (BEGIN/END) are correct +- Comments explain non-obvious logic +- Comments match actual code behavior +- No outdated or incorrect comments + +**Make changes as needed** to ensure comments are accurate and helpful. + +#### 4.7 Generate Review Summary + +For each sync sample that had changes, create a markdown file: + +**File naming**: `SampleReview_YYYYMMDD_HHMMSS.md` (e.g., `SampleReview_20260126_143022.md`) + +**File location**: `target/sample_reviews/` (create directory if needed) + +**Summary format:** +```markdown +# Sample Review: Sample##_Name + +**Date**: YYYY-MM-DD HH:MM:SS +**Sample Type**: Sync +**File**: `src/samples/java/com/azure/ai/contentunderstanding/samples/Sample##_Name.java` + +## Sample Purpose +[Brief description of what the sample demonstrates] + +## Changes Made + +### Code Correctness +- [ ] Issue: [Description] + - Fix: [What was changed] + +### Code Clarity +- [ ] Issue: [Description] + - Fix: [What was changed] + +### Output Verification +- [ ] Issue: [Description] + - Fix: [What was changed] + +### Comments +- [ ] Issue: [Description] + - Fix: [What was changed] + +## Summary +[Overall summary of changes and improvements] +``` + +### Step 5: Review Async Samples + +For each async sample, perform the same review as sync samples, PLUS: + +#### 5.1 Verify Functional Parity with Sync Sample + +Compare async sample with its sync counterpart: + +**Must match:** +- Same operations performed (in same order) +- Same error handling logic +- Same output messages (except file name differences) +- Same business logic and flow + +**Expected differences (acceptable):** +- File name: `Sample##_NameAsync.java` vs `Sample##_Name.java` +- Client type: `ContentUnderstandingAsyncClient` vs `ContentUnderstandingClient` +- Reactive patterns: `Mono`/`Flux`, `block()`, `subscribe()`, etc. +- Output file name in output: `Sample##_NameAsync.out.txt` vs `Sample##_Name.out.txt` + +**Must NOT differ:** +- Core functionality +- Error handling approach +- Output content (except file names) +- Business logic + +#### 5.2 Verify Output Matches Sync Output + +Compare async output with sync output: + +1. **Same operations**: Verify same operations are performed +2. **Same results**: Output values should match (except file names) +3. **Same error handling**: Error messages should match +4. **Reactive patterns**: Verify proper use of reactive operators + +**Common async-specific issues:** +- Missing `block()` calls causing incomplete execution +- Incorrect reactive operator usage +- Missing error handling in reactive chains +- Output order differences (acceptable if due to async nature) + +#### 5.3 Make Changes + +After identifying issues: +- Fix functional parity problems +- Ensure output matches sync sample +- Fix reactive pattern issues +- Update comments to match sync sample + +#### 5.4 Update Review Summary + +If async sample had changes, update the corresponding review summary file (same file as sync sample, or create new one if sync had no changes): + +```markdown +## Async Sample Review: Sample##_NameAsync + +**File**: `src/samples/java/com/azure/ai/contentunderstanding/samples/Sample##_NameAsync.java` + +### Functional Parity Issues +- [ ] Issue: [Description] + - Fix: [What was changed] + +### Output Verification +- [ ] Issue: [Description] + - Fix: [What was changed] + +### Reactive Pattern Issues +- [ ] Issue: [Description] + - Fix: [What was changed] +``` + +## Review Checklist + +For each sample group, verify: + +### Sync Sample +- [ ] Code is correct and handles edge cases +- [ ] Code is clear and readable +- [ ] Output file matches code behavior +- [ ] All expected output is present +- [ ] No unexpected errors or warnings +- [ ] Class-level JavaDoc is accurate +- [ ] Inline comments are accurate and helpful + +### Async Sample +- [ ] Functional parity with sync sample (100% match) +- [ ] Output matches sync sample output (except file names) +- [ ] Reactive patterns are correct +- [ ] Error handling matches sync sample +- [ ] Comments match sync sample + +## Output Files + +All review summaries are saved to: +- **Directory**: `target/sample_reviews/` +- **Format**: `SampleReview_YYYYMMDD_HHMMSS.md` +- **Content**: Detailed review findings and changes made + +## Related Skills + +- **`run-all-samples`**: Generates sample output files. Automatically invoked if outputs are missing. +- **`compile-cu-sdk-in-place`**: Compiles CU SDK. Required before running samples. +- **`create-cu-async-sample`**: Creates async samples. Useful reference for async patterns. + +## Example Workflow + +``` +1. Check git status → Clean ✓ +2. Enumerate samples → Found 17 groups +3. Check outputs → 12 missing, 5 exist +4. Run run-all-samples → Generated 12 outputs +5. Review Sample00_UpdateDefaults (sync) + - Code review ✓ + - Output verification ✓ + - Made 2 fixes + - Created SampleReview_20260126_143022.md +6. Review Sample00_UpdateDefaultsAsync (async) + - Functional parity check ✓ + - Output comparison ✓ + - Made 1 fix + - Updated SampleReview_20260126_143022.md +7. Continue with remaining samples... +``` + +## Notes + +- **Stop on errors**: If git status shows changes, stop immediately +- **Incremental reviews**: Review summaries are created per sample, allowing incremental progress +- **Output preservation**: Existing outputs are preserved unless explicitly regenerated +- **Change tracking**: All changes are documented in review summary files diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-compile/SKILL.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-compile/SKILL.md new file mode 100644 index 000000000000..2afe0d8d8c8b --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-compile/SKILL.md @@ -0,0 +1,85 @@ +--- +name: sdk-compile +description: | + Compile Azure SDK source code. + + This skill helps you: + - Build SDK modules locally + - Verify compilation before testing + - Resolve dependency issues + + Supported build systems: Maven (Java), pip (Python), dotnet (C#), npm (JavaScript) + + Trigger phrases: "compile sdk", "build project", "maven compile" +--- + +# SDK Compile + +This skill compiles Azure SDK source code for local development and testing. + +## 🎯 What This Skill Does + +1. Detects the SDK language and build system +2. Compiles source code with appropriate flags +3. Reports compilation errors with context + +## 📋 Pre-requisites + +- [ ] SDK source code checked out +- [ ] Build tools installed (Maven/pip/dotnet/npm) +- [ ] JDK 8+ (for Java) + +## 🔧 Usage + +### Java (Maven) + +```bash +# Navigate to SDK module +cd sdk/{service}/azure-{service} + +# Install in editable mode +pip install -e . +``` + +### .NET (dotnet) + +```bash +# Navigate to SDK module +cd sdk/{service}/{module} + +# Build +npm run build +``` + +## 📦 Java-Specific Notes + +### Compile Single Module (Recommended) + +```bash +# From repo root +mvn compile -pl sdk/contentunderstanding/azure-ai-contentunderstanding -am +``` + +### Common Maven Flags + +```bash +# Install to local repo first +mvn install -DskipTests -pl sdk/core/azure-core +``` + +### Checkstyle Errors + +Fix code style issues instead of disabling Checkstyle rules. + +### SpotBugs Warnings + +Address warnings instead of suppressing them. + +## 🌐 Cross-Language Commands + +| Language | Compile Command | Notes | +|----------|----------------|-------| +| Java | `mvn compile` | Requires JDK 8+ | +| Python | `pip install -e .` | Creates editable install | +| .NET | `dotnet build` | Requires .NET SDK | +| JavaScript | `npm run build` | Check package.json for script | diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-compile/scripts/compile.sh b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-compile/scripts/compile.sh new file mode 100644 index 000000000000..c959211696b8 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-compile/scripts/compile.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# Compile Azure SDK module +# Usage: ./compile.sh [module-path] + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +MODULE_PATH="${1:-.}" +cd "$MODULE_PATH" + +echo -e "${YELLOW}Detecting build system...${NC}" + +# Detect build system and compile +if [ -f "pom.xml" ]; then + echo -e "${GREEN}Found pom.xml - Using Maven${NC}" + mvn compile -f pom.xml -DskipTests + +elif [ -f "setup.py" ] || [ -f "pyproject.toml" ]; then + echo -e "${GREEN}Found Python project - Using pip${NC}" + pip install -e . + +elif [ -f "*.csproj" ] 2>/dev/null; then + echo -e "${GREEN}Found .csproj - Using dotnet${NC}" + dotnet build + +elif [ -f "package.json" ]; then + echo -e "${GREEN}Found package.json - Using npm${NC}" + npm run build + +else + echo -e "${RED}No supported build system found${NC}" + echo "Supported: pom.xml (Maven), setup.py/pyproject.toml (Python), *.csproj (.NET), package.json (npm)" + exit 1 +fi + +echo -e "${GREEN}Compilation completed successfully!${NC}" diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-push-recordings/SKILL.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-push-recordings/SKILL.md new file mode 100644 index 000000000000..7ce65aa2fcbc --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-push-recordings/SKILL.md @@ -0,0 +1,122 @@ +--- +name: sdk-push-recordings +description: | + Push session recordings to Azure SDK Assets repository. + + This skill helps you: + - Push new test recordings after RECORD mode + - Update assets.json with new tag + - Manage session records in external repo + + IMPORTANT: Run after successful RECORD mode tests. + + Trigger phrases: "push recordings", "push assets", "update session records" +--- + +# SDK Push Recordings + +This skill pushes session recordings to the Azure SDK Assets repository after RECORD mode testing. + +## 🎯 What This Skill Does + +1. Validates local session recordings +2. Pushes recordings to Azure SDK Assets repo +3. Updates `assets.json` with new tag +4. Commits the updated `assets.json` (optional) + +## 📋 Pre-requisites + +- [ ] RECORD mode tests completed successfully +- [ ] Session recordings exist in `.assets` directory +- [ ] Git credentials configured for Azure SDK Assets repo +- [ ] `assets.json` file present in module directory + +## 🔧 Usage + +### Push Recordings + +```bash +# Check new tag in assets.json +cat assets.json + +# Example output: +# { +# "AssetsRepo": "Azure/azure-sdk-assets", +# "AssetsRepoPrefixPath": "java", +# "TagPrefix": "java/contentunderstanding/azure-ai-contentunderstanding", +# "Tag": "java/contentunderstanding/azure-ai-contentunderstanding_abc123" +# } +``` + +## 📦 Assets Repository + +### Repository Location + +- **Main repo**: `Azure/azure-sdk-assets` +- **URL**: + +### Tag Format + +``` +{language}/{service}/{module}_{commit-hash} +``` + +Example: `java/contentunderstanding/azure-ai-contentunderstanding_7c2854bb8e` + +## ⚠️ Important Notes + +### Git Credentials + +- Be patient during upload +- Check network connection +- Verify disk space in `.assets` + +### After Push + +```bash +# Check Git credentials +git credential-manager get + +# Or use HTTPS with PAT +export GIT_ASKPASS=/path/to/credential-helper +``` + +### Push Failed - No Changes + +If no recordings changed, push will succeed but tag won't update. + +### Missing .assets Directory + +```bash +# Run RECORD mode first +mvn test -DAZURE_TEST_MODE=RECORD + +# Then push +test-proxy push -a assets.json +``` + +## 🌐 Cross-Language Workflow + +The push workflow is the same for all languages: + +```bash +# 1. Run RECORD mode tests +# (language-specific command) + +# 2. Push recordings (universal) +test-proxy push -a assets.json + +# 3. Verify PLAYBACK mode +# (language-specific command) + +# 4. Commit assets.json +git add assets.json +git commit -m "Update session recordings" +``` + +## ✅ Post-Push Checklist + +- [ ] New tag visible in `assets.json` +- [ ] PLAYBACK tests pass with new recordings +- [ ] `assets.json` committed to your branch +- [ ] PR updated with recording changes diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-push-recordings/scripts/push-recordings.sh b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-push-recordings/scripts/push-recordings.sh new file mode 100644 index 000000000000..7f8756ee5fcf --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-push-recordings/scripts/push-recordings.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Push session recordings to Azure SDK Assets repository +# Usage: ./push-recordings.sh + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +echo -e "${YELLOW}Pushing session recordings to Azure SDK Assets repo...${NC}" + +# Check for assets.json +if [ ! -f "assets.json" ]; then + echo -e "${RED}Error: assets.json not found${NC}" + echo "This file is required to push recordings." + exit 1 +fi + +# Show current tag +CURRENT_TAG=$(cat assets.json | grep -o '"Tag"[[:space:]]*:[[:space:]]*"[^"]*"' | cut -d'"' -f4) +echo -e "${YELLOW}Current tag: $CURRENT_TAG${NC}" + +# Push recordings +echo -e "${YELLOW}Pushing recordings...${NC}" +test-proxy push -a assets.json + +# Show new tag +NEW_TAG=$(cat assets.json | grep -o '"Tag"[[:space:]]*:[[:space:]]*"[^"]*"' | cut -d'"' -f4) +echo "" +echo -e "${GREEN}Push completed!${NC}" +echo -e "New tag: ${GREEN}$NEW_TAG${NC}" + +if [ "$CURRENT_TAG" == "$NEW_TAG" ]; then + echo -e "${YELLOW}Note: Tag unchanged (no new recordings)${NC}" +else + echo "" + echo -e "${YELLOW}Next steps:${NC}" + echo "1. Verify PLAYBACK tests: mvn test -DAZURE_TEST_MODE=PLAYBACK" + echo "2. Commit assets.json: git add assets.json && git commit -m 'Update session recordings'" +fi diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-all-samples/SKILL.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-all-samples/SKILL.md new file mode 100644 index 000000000000..e4098f410f86 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-all-samples/SKILL.md @@ -0,0 +1,88 @@ +--- +name: sdk-run-all-samples +description: | + Run all Azure SDK samples in sequence. + + This skill helps you: + - Execute all samples for validation + - Verify sample code works correctly + - Batch test sample implementations + + Trigger phrases: "run all samples", "execute all samples", "test all samples" +--- + +# SDK Run All Samples + +This skill runs all Azure SDK samples in a module for comprehensive validation. + +## 🎯 What This Skill Does + +1. Discovers all sample files in the SDK module +2. Compiles samples if needed +3. Executes each sample in sequence +4. Reports overall results + +## 📋 Pre-requisites + +- [ ] SDK module compiled successfully +- [ ] Environment variables configured (for live samples) +- [ ] Session recordings restored (for PLAYBACK mode) + +## 🔧 Usage + +### Java (Maven) - PLAYBACK Mode + +```bash +# Run all samples with live service +mvn test -Dtest="Sample*" -DAZURE_TEST_MODE=RECORD +``` + +### Python + +```bash +cd sdk/{service}/Azure.{Service}/samples +dotnet test +``` + +### JavaScript + +```bash +# Find all sample test classes +find src/samples -name "Sample*.java" -exec basename {} .java \; +``` + +### Expected Output + +- Check sample for cleanup code +- Manually delete test resources if needed + +### Timeout Handling + +Long-running samples may timeout: + +```bash +# Increase Maven timeout +mvn test -Dtest="Sample*" -Dsurefire.timeout=600 +``` + +## 🔍 Troubleshooting + +### Some Samples Skipped + +Check `@Disabled` annotations or conditional execution. + +### Environment Variables Missing + +```bash +# Load from .env file +source .github/skills/sdk-setup-env/scripts/load-env.sh +``` + +## 🌐 Cross-Language Commands + +| Language | Command | Notes | +|----------|---------|-------| +| Java | `mvn test -Dtest="Sample*"` | Wildcard pattern | +| Python | `pytest samples/` | pytest discovers tests | +| .NET | `dotnet test samples/` | Test all sample projects | +| JavaScript | `npm run samples` | Check package.json | diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-all-samples/scripts/run-all-samples.sh b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-all-samples/scripts/run-all-samples.sh new file mode 100644 index 000000000000..57ad56ba3b4e --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-all-samples/scripts/run-all-samples.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Run all samples in the SDK module +# Usage: ./run-all-samples.sh [test-mode] + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +TEST_MODE="${1:-PLAYBACK}" + +echo -e "${YELLOW}Running ALL samples in $TEST_MODE mode${NC}" + +# Check for pom.xml +if [ ! -f "pom.xml" ]; then + echo -e "${RED}Error: pom.xml not found. Run from SDK module directory.${NC}" + exit 1 +fi + +# Restore recordings for PLAYBACK mode +if [ "$TEST_MODE" == "PLAYBACK" ] && [ -f "assets.json" ]; then + echo -e "${YELLOW}Restoring session recordings...${NC}" + test-proxy restore -a assets.json 2>/dev/null || true +fi + +# List samples that will be run +echo -e "${YELLOW}Discovering samples...${NC}" +SAMPLES=$(find src/samples -name "Sample*.java" 2>/dev/null | wc -l || echo "0") +echo -e "Found ${GREEN}$SAMPLES${NC} sample files" + +# Run all samples +echo -e "${GREEN}Running Maven tests for all samples...${NC}" +mvn test -Dtest="Sample*" -DAZURE_TEST_MODE="$TEST_MODE" -f pom.xml + +echo "" +echo -e "${GREEN}All samples completed!${NC}" diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-sample/SKILL.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-sample/SKILL.md new file mode 100644 index 000000000000..f3b5cbd8694b --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-sample/SKILL.md @@ -0,0 +1,79 @@ +--- +name: sdk-run-sample +description: | + Run a single Azure SDK sample. + + This skill helps you: + - Execute individual SDK samples + - Test sample code functionality + - Debug sample implementations + + Trigger phrases: "run sample", "execute sample", "test sample code" +--- + +# SDK Run Sample + +This skill runs individual Azure SDK samples for testing and demonstration. + +## 🎯 What This Skill Does + +1. Identifies sample files in the SDK module +2. Compiles the sample if needed +3. Executes the sample with proper configuration +4. Reports execution results + +## 📋 Pre-requisites + +- [ ] SDK module compiled successfully +- [ ] Environment variables configured (for live samples) +- [ ] Sample file exists in module + +## 🔧 Usage + +### Java (Maven) + +```bash +cd sdk/{service}/azure-{service}/samples +python sample_basic_operations.py +``` + +### .NET + +```bash +cd sdk/{service}/{module}/samples +npx ts-node sample_basic_operations.ts +``` + +## 📦 Sample Locations + +| Language | Location | Pattern | +|----------|----------|---------| +| Java | `src/samples/java/` | `Sample*.java` | +| Python | `samples/` | `sample_*.py` | +| .NET | `samples/` | `Sample*.cs` | +| JavaScript | `samples/` | `*.ts` or `*.js` | + +## ⚠️ Sample Types + +### Live Samples (Require Credentials) + +- Use pre-recorded responses +- No credentials needed +- Fast and repeatable + +## 🔍 Finding Samples + +### Java + +```bash +ls samples/sample_*.py +``` + +## 🌐 Cross-Language Commands + +| Language | Command | Notes | +|----------|---------|-------| +| Java | `mvn test -Dtest={SampleClass}` | Samples are test classes | +| Python | `python samples/{sample}.py` | Direct execution | +| .NET | `dotnet run --project samples/{sample}` | Project-based | +| JavaScript | `npx ts-node samples/{sample}.ts` | TypeScript | diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-sample/scripts/run-sample.sh b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-sample/scripts/run-sample.sh new file mode 100644 index 000000000000..20fffa895c69 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-run-sample/scripts/run-sample.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Run sample by name or pattern +# Usage: ./run-sample.sh [sample-name-or-pattern] + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +SAMPLE_PATTERN="${1:-Sample*}" +TEST_MODE="${AZURE_TEST_MODE:-PLAYBACK}" + +echo -e "${YELLOW}Running sample(s): $SAMPLE_PATTERN${NC}" +echo -e "${YELLOW}Test mode: $TEST_MODE${NC}" + +# Check for pom.xml +if [ ! -f "pom.xml" ]; then + echo -e "${RED}Error: pom.xml not found. Run from SDK module directory.${NC}" + exit 1 +fi + +# Restore recordings for PLAYBACK mode +if [ "$TEST_MODE" == "PLAYBACK" ] && [ -f "assets.json" ]; then + echo -e "${YELLOW}Restoring session recordings...${NC}" + test-proxy restore -a assets.json 2>/dev/null || true +fi + +# Run sample +echo -e "${GREEN}Running Maven test...${NC}" +mvn test -Dtest="$SAMPLE_PATTERN" -DAZURE_TEST_MODE="$TEST_MODE" -f pom.xml + +echo "" +echo -e "${GREEN}Sample execution completed!${NC}" diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-setup-env/SKILL.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-setup-env/SKILL.md new file mode 100644 index 000000000000..8b1ca3583df8 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-setup-env/SKILL.md @@ -0,0 +1,63 @@ +--- +name: sdk-setup-env +description: | + Load environment variables from .env file for Azure SDK development. + + This skill helps you: + - Find .env files in your project + - Load environment variables into your shell session + - Validate required variables for testing + + Trigger phrases: "load env", "setup environment", "configure SDK" +--- + +# SDK Environment Setup + +This skill loads environment variables from `.env` files for Azure SDK development and testing. + +## 🎯 What This Skill Does + +1. Locates `.env` files in your workspace +2. Loads environment variables into the current shell session +3. Validates required variables for SDK testing + +## 📋 Pre-requisites + +- [ ] `.env` file exists in the SDK module directory +- [ ] Required Azure credentials are configured + +## 🔧 Usage + +### Quick Start (Bash/Zsh) + +```powershell +# Navigate to SDK module directory +cd sdk\{service}\{module} + +# Load environment variables +. .github\skills\sdk-setup-env\scripts\load-env.ps1 +``` + +## 📦 Required Environment Variables + +### Common Variables (All Services) + +| Variable | Description | +|----------|-------------| +| `CONTENT_UNDERSTANDING_ENDPOINT` | Service endpoint URL | +| `CONTENT_UNDERSTANDING_KEY` | Service key (optional if using AAD) | + +## ⚠️ Security Notes + +- Never commit `.env` files to version control +- Ensure `.gitignore` includes `.env` +- Use Azure Key Vault for production secrets + +## 🌐 Cross-Language Support + +| Language | Script | Notes | +|----------|--------|-------| +| Java | `load-env.sh` | Export vars before Maven | +| Python | `load-env.sh` | python-dotenv also works | +| .NET | `load-env.ps1` | launchSettings.json alternative | +| JavaScript | `load-env.sh` | dotenv package alternative | diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-setup-env/scripts/load-env.ps1 b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-setup-env/scripts/load-env.ps1 new file mode 100644 index 000000000000..1754bc0d807f --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-setup-env/scripts/load-env.ps1 @@ -0,0 +1,58 @@ +# Load environment variables from .env file +# Usage: . .\load-env.ps1 [path\to\.env] + +param( + [string]$EnvFile = ".env" +) + +function Write-ColorOutput { + param([string]$Message, [string]$Color = "White") + Write-Host $Message -ForegroundColor $Color +} + +# Find .env file +if (-not (Test-Path $EnvFile)) { + # Try to find .env in parent directories + $dir = Get-Location + while ($dir -ne $null) { + $testPath = Join-Path $dir ".env" + if (Test-Path $testPath) { + $EnvFile = $testPath + break + } + $dir = Split-Path $dir -Parent + } +} + +if (-not (Test-Path $EnvFile)) { + Write-ColorOutput "Error: .env file not found" "Red" + Write-ColorOutput "Create a .env file with your Azure credentials" + exit 1 +} + +Write-ColorOutput "Loading environment from: $EnvFile" "Yellow" + +# Load variables +Get-Content $EnvFile | ForEach-Object { + $line = $_.Trim() + + # Skip comments and empty lines + if ($line -match "^#" -or [string]::IsNullOrEmpty($line)) { + return + } + + # Parse key=value + if ($line -match "^([^=]+)=(.*)$") { + $key = $matches[1].Trim() + $value = $matches[2].Trim() + + # Remove surrounding quotes + $value = $value -replace '^["'']|["'']$', '' + + # Set environment variable + [Environment]::SetEnvironmentVariable($key, $value, "Process") + Write-ColorOutput "✓ Loaded: $key" "Green" + } +} + +Write-ColorOutput "Environment loaded successfully!" "Green" diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-setup-env/scripts/load-env.sh b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-setup-env/scripts/load-env.sh new file mode 100644 index 000000000000..6d25e4de4f66 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-setup-env/scripts/load-env.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Load environment variables from .env file +# Usage: source load-env.sh [path/to/.env] + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Find .env file +ENV_FILE="${1:-.env}" + +if [ ! -f "$ENV_FILE" ]; then + # Try to find .env in parent directories + DIR=$(pwd) + while [ "$DIR" != "/" ]; do + if [ -f "$DIR/.env" ]; then + ENV_FILE="$DIR/.env" + break + fi + DIR=$(dirname "$DIR") + done +fi + +if [ ! -f "$ENV_FILE" ]; then + echo -e "${RED}Error: .env file not found${NC}" + echo "Create a .env file with your Azure credentials" + exit 1 +fi + +echo -e "${YELLOW}Loading environment from: $ENV_FILE${NC}" + +# Load variables +while IFS='=' read -r key value; do + # Skip comments and empty lines + [[ $key =~ ^[[:space:]]*# ]] && continue + [[ -z $key ]] && continue + + # Remove surrounding quotes from value + value="${value%\"}" + value="${value#\"}" + value="${value%\'}" + value="${value#\'}" + + # Export variable + export "$key=$value" + echo -e "${GREEN}✓${NC} Loaded: $key" +done < "$ENV_FILE" + +echo -e "${GREEN}Environment loaded successfully!${NC}" diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-playback/SKILL.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-playback/SKILL.md new file mode 100644 index 000000000000..ee72d087d33f --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-playback/SKILL.md @@ -0,0 +1,102 @@ +--- +name: sdk-test-playback +description: | + Run Azure SDK tests in PLAYBACK mode using recorded API responses. + + This skill helps you: + - Run tests offline without Azure credentials + - Verify SDK behavior against recorded responses + - CI/CD testing without live service access + + No Azure credentials required - uses previously recorded sessions. + + Trigger phrases: "playback tests", "PLAYBACK mode", "run offline tests" +--- + +# SDK Test Playback + +This skill runs Azure SDK tests in PLAYBACK mode using previously recorded API responses. + +## 🎯 What This Skill Does + +1. Restores session records from Azure SDK Assets repo +2. Starts test proxy in PLAYBACK mode +3. Runs tests using recorded HTTP responses +4. Reports test results + +## 📋 Pre-requisites + +- [ ] `assets.json` file present in module directory +- [ ] Test proxy installed (`test-proxy` command available) +- [ ] Session records available (run RECORD mode first if missing) + +## 🔧 Usage + +### Step 1: Restore Recordings + +```bash +# Run all tests in PLAYBACK mode +mvn test -DAZURE_TEST_MODE=PLAYBACK + +# Run specific test class +mvn test -DAZURE_TEST_MODE=PLAYBACK -Dtest=Sample01* +``` + +#### Python (pytest) + +```bash +dotnet test /p:TestMode=Playback +``` + +#### JavaScript (npm) + +```bash +# Restore recordings to local .assets directory +test-proxy restore -a assets.json +``` + +### Check Assets Tag + + ```bash + mvn test -DAZURE_TEST_MODE=RECORD -Dtest=FailingTestClass + ``` + +2. **Check test data**: Ensure test uses same input as recorded session + +3. **Restore assets**: Run `test-proxy restore -a assets.json` + +### Test Proxy Not Running + +```bash +# Record fresh session +mvn test -DAZURE_TEST_MODE=RECORD + +# Push new recordings +test-proxy push -a assets.json +``` + +## 🔍 Debugging Tips + +### Verbose Test Proxy Output + +```bash +# List recording files +ls -la .assets/*/ +``` + +## 🌐 Cross-Language Test Mode + +| Language | Environment Variable | Command Flag | +|----------|---------------------|--------------| +| Java | `AZURE_TEST_MODE=PLAYBACK` | `-DAZURE_TEST_MODE=PLAYBACK` | +| Python | `AZURE_TEST_MODE=PLAYBACK` | `--azure-test-mode=playback` | +| .NET | `AZURE_TEST_MODE=Playback` | `/p:TestMode=Playback` | +| JavaScript | `AZURE_TEST_MODE=playback` | `--test-mode=playback` | + +## ✅ Benefits of PLAYBACK Mode + +1. **No Azure credentials needed** - Tests run offline +2. **Fast execution** - No network latency +3. **Deterministic** - Same results every time +4. **CI/CD friendly** - No service dependencies +5. **Cost-free** - No Azure resource consumption diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-playback/scripts/test-playback.sh b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-playback/scripts/test-playback.sh new file mode 100644 index 000000000000..a54a3d20cf80 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-playback/scripts/test-playback.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash +# Run Azure SDK tests in PLAYBACK mode +# Usage: ./test-playback.sh [test-class-pattern] + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +TEST_PATTERN="${1:-}" + +echo -e "${YELLOW}Running tests in PLAYBACK mode...${NC}" + +# Restore recordings first +if [ -f "assets.json" ]; then + echo -e "${YELLOW}Restoring session recordings...${NC}" + test-proxy restore -a assets.json +else + echo -e "${RED}Error: assets.json not found${NC}" + echo "PLAYBACK mode requires recorded sessions. Run RECORD mode first." + exit 1 +fi + +# Detect build system and run tests +if [ -f "pom.xml" ]; then + echo -e "${GREEN}Running Maven tests in PLAYBACK mode${NC}" + + if [ -n "$TEST_PATTERN" ]; then + mvn test -DAZURE_TEST_MODE=PLAYBACK -Dtest="$TEST_PATTERN" -f pom.xml + else + mvn test -DAZURE_TEST_MODE=PLAYBACK -f pom.xml + fi + +elif [ -f "setup.py" ] || [ -f "pyproject.toml" ]; then + echo -e "${GREEN}Running pytest in playback mode${NC}" + + if [ -n "$TEST_PATTERN" ]; then + pytest --azure-test-mode=playback -k "$TEST_PATTERN" + else + pytest --azure-test-mode=playback + fi + +elif [ -f "*.csproj" ] 2>/dev/null; then + echo -e "${GREEN}Running dotnet tests in Playback mode${NC}" + + if [ -n "$TEST_PATTERN" ]; then + dotnet test /p:TestMode=Playback --filter "$TEST_PATTERN" + else + dotnet test /p:TestMode=Playback + fi + +elif [ -f "package.json" ]; then + echo -e "${GREEN}Running npm tests in playback mode${NC}" + + export AZURE_TEST_MODE=playback + npm test + +else + echo -e "${RED}No supported build system found${NC}" + exit 1 +fi + +echo "" +echo -e "${GREEN}PLAYBACK mode tests completed!${NC}" diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-record/SKILL.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-record/SKILL.md new file mode 100644 index 000000000000..e27e45c153c2 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-record/SKILL.md @@ -0,0 +1,86 @@ +--- +name: sdk-test-record +description: | + Run Azure SDK tests in RECORD mode to capture live API responses. + + This skill helps you: + - Record new test sessions with live Azure services + - Update existing recordings when APIs change + - Generate session record files for playback testing + + IMPORTANT: Requires Azure credentials and live service access. + + Trigger phrases: "record tests", "RECORD mode", "capture test recordings" +--- + +# SDK Test Record + +This skill runs Azure SDK tests in RECORD mode to capture live API responses for playback testing. + +## 🎯 What This Skill Does + +1. Starts the test proxy in RECORD mode +2. Runs tests against live Azure services +3. Captures HTTP request/response pairs +4. Saves session records to `.assets` directory + +## 📋 Pre-requisites + +- [ ] Azure credentials configured (via `.env` or environment) +- [ ] Test proxy installed (`test-proxy` command available) +- [ ] `assets.json` file present in module directory +- [ ] Live Azure service endpoint accessible + +## 🔧 Usage + +### Java (Maven) + +```bash +cd sdk/{service}/azure-{service} +pytest --azure-test-mode=record +``` + +### .NET (dotnet) + +```bash +cd sdk/{service}/{module} +npm test -- --test-mode=record +``` + +## 📦 Test Proxy Commands + +### Restore Existing Recordings + +```bash +# After RECORD mode, push new recordings +test-proxy push -a assets.json +``` + +## ⚠️ Important Notes + +### Recording Requirements + +- API keys and tokens +- Subscription IDs +- Client secrets + +### Session Records Location + +```bash +# Check required variables +echo $CONTENT_UNDERSTANDING_ENDPOINT +echo $AZURE_CLIENT_ID +``` + +### Test Proxy Not Running + +Tests will overwrite existing recordings. Use `test-proxy restore` first if you want to preserve them. + +## 🌐 Cross-Language Test Mode + +| Language | Environment Variable | Command Flag | +|----------|---------------------|--------------| +| Java | `AZURE_TEST_MODE=RECORD` | `-DAZURE_TEST_MODE=RECORD` | +| Python | `AZURE_TEST_MODE=RECORD` | `--azure-test-mode=record` | +| .NET | `AZURE_TEST_MODE=Record` | `/p:TestMode=Record` | +| JavaScript | `AZURE_TEST_MODE=record` | `--test-mode=record` | diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-record/scripts/test-record.sh b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-record/scripts/test-record.sh new file mode 100644 index 000000000000..d59d73ea0417 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-test-record/scripts/test-record.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Run Azure SDK tests in RECORD mode +# Usage: ./test-record.sh [test-class-pattern] + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +TEST_PATTERN="${1:-}" + +echo -e "${YELLOW}Running tests in RECORD mode...${NC}" +echo -e "${YELLOW}WARNING: This will connect to live Azure services${NC}" + +# Check for Azure credentials +if [ -z "$AZURE_CLIENT_ID" ] && [ -z "$CONTENT_UNDERSTANDING_KEY" ]; then + echo -e "${YELLOW}Tip: Run 'source sdk-setup-env/scripts/load-env.sh' to load credentials${NC}" +fi + +# Restore existing recordings first +if [ -f "assets.json" ]; then + echo -e "${YELLOW}Restoring existing recordings...${NC}" + test-proxy restore -a assets.json 2>/dev/null || true +fi + +# Detect build system and run tests +if [ -f "pom.xml" ]; then + echo -e "${GREEN}Running Maven tests in RECORD mode${NC}" + + if [ -n "$TEST_PATTERN" ]; then + mvn test -DAZURE_TEST_MODE=RECORD -Dtest="$TEST_PATTERN" -f pom.xml + else + mvn test -DAZURE_TEST_MODE=RECORD -f pom.xml + fi + +elif [ -f "setup.py" ] || [ -f "pyproject.toml" ]; then + echo -e "${GREEN}Running pytest in record mode${NC}" + + if [ -n "$TEST_PATTERN" ]; then + pytest --azure-test-mode=record -k "$TEST_PATTERN" + else + pytest --azure-test-mode=record + fi + +elif [ -f "*.csproj" ] 2>/dev/null; then + echo -e "${GREEN}Running dotnet tests in Record mode${NC}" + + if [ -n "$TEST_PATTERN" ]; then + dotnet test /p:TestMode=Record --filter "$TEST_PATTERN" + else + dotnet test /p:TestMode=Record + fi + +elif [ -f "package.json" ]; then + echo -e "${GREEN}Running npm tests in record mode${NC}" + + export AZURE_TEST_MODE=record + npm test + +else + echo -e "${RED}No supported build system found${NC}" + exit 1 +fi + +echo "" +echo -e "${GREEN}RECORD mode tests completed!${NC}" +echo -e "${YELLOW}Next step: Run 'test-proxy push -a assets.json' to push recordings${NC}" diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-workflow-record-push/SKILL.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-workflow-record-push/SKILL.md new file mode 100644 index 000000000000..39f6b41aab5f --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-workflow-record-push/SKILL.md @@ -0,0 +1,127 @@ +--- +name: sdk-workflow-record-push +description: | + Complete workflow to record tests and push recordings to Azure SDK Assets repo. + + This workflow executes: setup-env → compile → test-record → push-recordings → test-playback + + Use when you need to: + - Record new test sessions with live Azure services + - Update existing recordings after API changes + - Complete the full RECORD and PUSH cycle + + Trigger phrases: "record and push", "complete recording workflow", "full test recording cycle" +--- + +# SDK Workflow: Record and Push + +This workflow orchestrates the complete process of recording SDK tests and pushing them to the Azure SDK Assets repository. + +## 🎯 What This Workflow Does + +1. Loads environment variables +2. Compiles the SDK +3. Runs tests in RECORD mode (live Azure services) +4. Pushes recordings to assets repo +5. Verifies with PLAYBACK mode + +## 📋 Pre-requisites + +- [ ] `.env` file with Azure credentials +- [ ] `assets.json` file in module directory +- [ ] Network access to Azure services +- [ ] Git credentials for Azure SDK Assets repo + +## 🔄 Workflow Steps + +Execute these steps in order. Stop if any step fails. + +### Step 1: Load Environment ➡️ `sdk-setup-env` + +```bash +# Load credentials from .env file +source .github/skills/sdk-setup-env/scripts/load-env.sh +``` + +**Checkpoint:** Verify `CONTENT_UNDERSTANDING_ENDPOINT` and credentials are set. + +--- + +### Step 2: Compile SDK ➡️ `sdk-compile` + +```bash +# Compile the SDK module +mvn compile -f pom.xml -DskipTests +``` + +**Checkpoint:** Build should succeed with no errors. + +--- + +### Step 3: Run RECORD Mode Tests ➡️ `sdk-test-record` + +```bash +# Run tests against live Azure services +mvn test -DAZURE_TEST_MODE=RECORD +``` + +**Checkpoint:** All tests should pass. Note any skipped tests. + +--- + +### Step 4: Push Recordings ➡️ `sdk-push-recordings` + +```bash +# Push session recordings to Azure SDK Assets repo +test-proxy push -a assets.json +``` + +**Checkpoint:** Note the new tag in `assets.json`. Example: + +``` +java/contentunderstanding/azure-ai-contentunderstanding_abc123 +``` + +--- + +### Step 5: Verify with PLAYBACK ➡️ `sdk-test-playback` + +```bash +# Restore and run with recorded responses +test-proxy restore -a assets.json +mvn test -DAZURE_TEST_MODE=PLAYBACK +``` + +**Checkpoint:** All tests should pass using recorded responses. + +--- + +## ✅ Completion Checklist + +After workflow completes successfully: + +- [ ] All tests passed in RECORD mode +- [ ] Recordings pushed (new tag in `assets.json`) +- [ ] All tests passed in PLAYBACK mode +- [ ] `assets.json` ready to commit + +## ⚠️ Error Recovery + +| Step | Common Error | Resolution | +|------|--------------|------------| +| Step 1 | Missing .env | Create .env with credentials | +| Step 2 | Compile error | Fix code issues | +| Step 3 | Auth failure | Check Azure credentials | +| Step 3 | Test failure | Debug failing test | +| Step 4 | Push failed | Check Git credentials | +| Step 5 | Recording mismatch | Re-run RECORD mode | + +## 🔗 Related Skills + +| Skill | Role in Workflow | +|-------|------------------| +| `sdk-setup-env` | Step 1 - Environment | +| `sdk-compile` | Step 2 - Build | +| `sdk-test-record` | Step 3 - Record | +| `sdk-push-recordings` | Step 4 - Push | +| `sdk-test-playback` | Step 5 - Verify | diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-workflow-record-push/scripts/run-workflow.sh b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-workflow-record-push/scripts/run-workflow.sh new file mode 100644 index 000000000000..5c236eb7fcf9 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.github/skills/sdk-workflow-record-push/scripts/run-workflow.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# SDK Workflow: Record and Push +# Complete workflow to record tests and push to assets repo +# Usage: ./run-workflow.sh + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SKILLS_DIR="$(dirname "$SCRIPT_DIR")/.." + +echo -e "${BLUE}╔════════════════════════════════════════════════════════════╗${NC}" +echo -e "${BLUE}║ SDK Workflow: Record and Push ║${NC}" +echo -e "${BLUE}╚════════════════════════════════════════════════════════════╝${NC}" +echo "" + +# Step 1: Load Environment +echo -e "${YELLOW}━━━ Step 1/5: Load Environment ━━━${NC}" +if [ -f "$SKILLS_DIR/sdk-setup-env/scripts/load-env.sh" ]; then + source "$SKILLS_DIR/sdk-setup-env/scripts/load-env.sh" +else + echo -e "${YELLOW}Warning: load-env.sh not found, using existing environment${NC}" +fi +echo -e "${GREEN}✓ Step 1 complete${NC}" +echo "" + +# Step 2: Compile SDK +echo -e "${YELLOW}━━━ Step 2/5: Compile SDK ━━━${NC}" +mvn compile -f pom.xml -DskipTests -q +echo -e "${GREEN}✓ Step 2 complete${NC}" +echo "" + +# Step 3: Run RECORD Mode Tests +echo -e "${YELLOW}━━━ Step 3/5: Run RECORD Mode Tests ━━━${NC}" +echo -e "${YELLOW}⚠ Connecting to live Azure services...${NC}" +mvn test -DAZURE_TEST_MODE=RECORD -f pom.xml +echo -e "${GREEN}✓ Step 3 complete${NC}" +echo "" + +# Step 4: Push Recordings +echo -e "${YELLOW}━━━ Step 4/5: Push Recordings ━━━${NC}" +if [ -f "assets.json" ]; then + OLD_TAG=$(grep -o '"Tag"[[:space:]]*:[[:space:]]*"[^"]*"' assets.json | cut -d'"' -f4) + test-proxy push -a assets.json + NEW_TAG=$(grep -o '"Tag"[[:space:]]*:[[:space:]]*"[^"]*"' assets.json | cut -d'"' -f4) + echo -e "Old tag: $OLD_TAG" + echo -e "New tag: ${GREEN}$NEW_TAG${NC}" +else + echo -e "${RED}Error: assets.json not found${NC}" + exit 1 +fi +echo -e "${GREEN}✓ Step 4 complete${NC}" +echo "" + +# Step 5: Verify with PLAYBACK +echo -e "${YELLOW}━━━ Step 5/5: Verify with PLAYBACK ━━━${NC}" +test-proxy restore -a assets.json +mvn test -DAZURE_TEST_MODE=PLAYBACK -f pom.xml +echo -e "${GREEN}✓ Step 5 complete${NC}" +echo "" + +# Summary +echo -e "${BLUE}╔════════════════════════════════════════════════════════════╗${NC}" +echo -e "${BLUE}║ Workflow Complete! ║${NC}" +echo -e "${BLUE}╚════════════════════════════════════════════════════════════╝${NC}" +echo "" +echo -e "${GREEN}✓ All tests recorded and verified${NC}" +echo -e "${GREEN}✓ Recordings pushed to assets repo${NC}" +echo -e "${GREEN}✓ New tag: $NEW_TAG${NC}" +echo "" +echo -e "${YELLOW}Next step: Commit assets.json to your branch${NC}" +echo -e " git add assets.json" +echo -e " git commit -m 'Update session recordings'" diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/.gitignore b/sdk/contentunderstanding/azure-ai-contentunderstanding/.gitignore new file mode 100644 index 000000000000..8a72794e8c58 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/.gitignore @@ -0,0 +1,6 @@ +# Local-only files and temporary scripts (not committed to git) +.local_only/ + +# Environment variable files (contain secrets) +.env +**/.env diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/CHANGELOG.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/CHANGELOG.md new file mode 100644 index 000000000000..0bfdea2aaad9 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/CHANGELOG.md @@ -0,0 +1,16 @@ +# Release History + +## 1.0.0-beta.1 (Unreleased) + +- Azure ContentUnderstanding client library for Java. This package contains Microsoft Azure ContentUnderstanding client library. + +### Features Added + +### Breaking Changes + +### Bugs Fixed + +### Other Changes +### Features Added + +- Initial release for the azure-ai-contentunderstanding Java SDK. diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/README.md b/sdk/contentunderstanding/azure-ai-contentunderstanding/README.md new file mode 100644 index 000000000000..99502bc08914 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/README.md @@ -0,0 +1,466 @@ +# Azure Content Understanding client library for Java + +Azure AI Content Understanding is a multimodal AI service that extracts semantic content from documents, video, audio, and image files. It transforms unstructured content into structured, machine-readable data optimized for retrieval-augmented generation (RAG) and automated workflows. + +Use the client library for Azure AI Content Understanding to: + +* **Extract document content** - Extract text, tables, figures, layout information, and structured markdown from documents (PDF, images with text or hand-written text, Office documents and more) +* **Transcribe and analyze audio** - Convert audio content into searchable transcripts with speaker diarization and timing information +* **Analyze video content** - Extract visual frames, transcribe audio tracks, and generate structured summaries from video files +* **Leverage prebuilt analyzers** - Use production-ready prebuilt analyzers across industries including finance and tax (invoices, receipts, tax forms), identity verification (passports, driver's licenses), mortgage and lending (loan applications, appraisals), procurement and contracts (purchase orders, agreements), and utilities (billing statements) +* **Create custom analyzers** - Build domain-specific analyzers for specialized content extraction needs across all four modalities (documents, video, audio, and images) +* **Classify documents and video** - Automatically categorize and extract information from documents and video by type + +[Source code][source_code] | [Package (Maven)][package_maven] | [API reference documentation][api_reference_docs] | [Product documentation][product_docs] + +## Getting started + +### Prerequisites + +- [Java Development Kit (JDK)][jdk] with version 8 or above +- [Azure subscription][azure_subscription] +- A **Microsoft Foundry resource** to use this package + +### Configuring Microsoft Foundry resource + +Before using the Content Understanding SDK, you need to set up a Microsoft Foundry resource and deploy the required large language models. Content Understanding currently uses OpenAI GPT models (such as gpt-4.1, gpt-4.1-mini, and text-embedding-3-large). + +#### Step 1: Create Microsoft Foundry resource + +> **Important:** You must create your Microsoft Foundry resource in a region that supports Content Understanding. For a list of available regions, see [Azure Content Understanding region and language support][cu_region_support]. + +1. Follow the steps in the [Azure Content Understanding quickstart][cu_quickstart] to create a Microsoft Foundry resource in the Azure portal +2. Get your Foundry resource's endpoint URL from Azure Portal: + - Go to [Azure Portal][azure_portal] + - Navigate to your Microsoft Foundry resource + - Go to **Resource Management** > **Keys and Endpoint** + - Copy the **Endpoint** URL (typically `https://.services.ai.azure.com/`) + +**Important: Grant Required Permissions** + +After creating your Microsoft Foundry resource, you must grant yourself the **Cognitive Services User** role to enable API calls for setting default model deployments: + +1. Go to [Azure Portal][azure_portal] +2. Navigate to your Microsoft Foundry resource +3. Go to **Access Control (IAM)** in the left menu +4. Click **Add** > **Add role assignment** +5. Select the **Cognitive Services User** role +6. Assign it to yourself (or the user/service principal that will run the application) + +> **Note:** This role assignment is required even if you are the owner of the resource. Without this role, you will not be able to call the Content Understanding API to configure model deployments for prebuilt analyzers and custom analyzers. + +#### Step 2: Deploy required models + +**Important:** The prebuilt and custom analyzers require large language model deployments. You must deploy at least these models before using prebuilt analyzers and custom analyzers: +- `prebuilt-documentSearch`, `prebuilt-imageSearch`, `prebuilt-audioSearch`, `prebuilt-videoSearch` require **gpt-4.1-mini** and **text-embedding-3-large** +- Other prebuilt analyzers like `prebuilt-invoice`, `prebuilt-receipt` require **gpt-4.1** and **text-embedding-3-large** + +To deploy a model: + +1. In Microsoft Foundry, go to **Deployments** > **Deploy model** > **Deploy base model** +2. Search for and select the model you want to deploy. Currently, prebuilt analyzers require models such as `gpt-4.1`, `gpt-4.1-mini`, and `text-embedding-3-large` +3. Complete the deployment with your preferred settings +4. Note the deployment name you chose (by convention, use the model name as the deployment name, e.g., `gpt-4.1` for the `gpt-4.1` model). You can use any deployment name you prefer, but you'll need to note it for use in Step 3 when configuring model deployments. + +Repeat this process for each model required by your prebuilt analyzers. + +For more information on deploying models, see [Create model deployments in Microsoft Foundry portal][deploy_models_docs]. + +#### Step 3: Configure model deployments (required for prebuilt analyzers) + +> **IMPORTANT:** This is a **one-time setup per Microsoft Foundry resource** that maps your deployed models to those required by the prebuilt analyzers and custom models. If you have multiple Microsoft Foundry resources, you need to configure each one separately. + +You need to configure the default model mappings in your Microsoft Foundry resource. This can be done programmatically using the SDK. The configuration maps your deployed models (e.g., `gpt-4.1`, `gpt-4.1-mini`, `text-embedding-3-large`) to the large language models required by prebuilt analyzers. + +To configure model deployments using code, see [Sample00_UpdateDefaults][sample00_update_defaults] for a complete example. The sample shows how to: +- Map your deployed models to the models required by prebuilt analyzers +- Retrieve the current default model deployment configuration +- Update the configuration with your model deployment mappings +- Verify the updated configuration + +The following shows how to set up the environment to run this sample successfully: + +**3-1. Set environment variables** + +The environment variables define your Microsoft Foundry resource endpoint and the deployment names for the models you deployed in Step 2. **Important:** The deployment name values (e.g., `gpt-4.1`, `gpt-4.1-mini`, `text-embedding-3-large`) must exactly match the deployment names you chose when deploying models in Step 2. + +**On Linux/macOS (bash):** +```bash +export CONTENTUNDERSTANDING_ENDPOINT="https://.services.ai.azure.com/" +export CONTENTUNDERSTANDING_KEY="" # Optional if using DefaultAzureCredential +export GPT_4_1_DEPLOYMENT="gpt-4.1" +export GPT_4_1_MINI_DEPLOYMENT="gpt-4.1-mini" +export TEXT_EMBEDDING_3_LARGE_DEPLOYMENT="text-embedding-3-large" +``` + +**On Windows (PowerShell):** +```powershell +$env:CONTENTUNDERSTANDING_ENDPOINT="https://.services.ai.azure.com/" +$env:CONTENTUNDERSTANDING_KEY="" # Optional if using DefaultAzureCredential +$env:GPT_4_1_DEPLOYMENT="gpt-4.1" +$env:GPT_4_1_MINI_DEPLOYMENT="gpt-4.1-mini" +$env:TEXT_EMBEDDING_3_LARGE_DEPLOYMENT="text-embedding-3-large" +``` + +**On Windows (Command Prompt):** +```bat +set CONTENTUNDERSTANDING_ENDPOINT=https://.services.ai.azure.com/ +set CONTENTUNDERSTANDING_KEY= # Optional if using DefaultAzureCredential +set GPT_4_1_DEPLOYMENT=gpt-4.1 +set GPT_4_1_MINI_DEPLOYMENT=gpt-4.1-mini +set TEXT_EMBEDDING_3_LARGE_DEPLOYMENT=text-embedding-3-large +``` + +**Notes:** +- If `CONTENTUNDERSTANDING_KEY` is not set, the SDK will fall back to `DefaultAzureCredential`. Ensure you have authenticated (e.g., `az login`). +- The deployment names must exactly match what you created in Microsoft Foundry in Step 2. + +**3-2. Run the configuration sample** + +To run the configuration sample, you'll need to add the SDK to your project and copy the sample code: + +**Step 1:** Add the SDK dependency to your project's `pom.xml`: + +```xml + + com.azure + azure-ai-contentunderstanding + 1.0.0-beta.1 + + + com.azure + azure-identity + 1.18.2 + +``` + +**Step 2:** Download or copy [Sample00_UpdateDefaults.java][sample00_update_defaults] to your project. + +**Step 3:** Run the sample: + +```bash +# Compile and run (from your project directory) +mvn compile +mvn exec:java -Dexec.mainClass="com.azure.ai.contentunderstanding.samples.Sample00_UpdateDefaults" +``` + +Or run it directly from your IDE by executing the `main` method in `Sample00_UpdateDefaults.java`. + +**Verification** + +After the script runs successfully, you can use prebuilt analyzers like `prebuilt-invoice` or `prebuilt-documentSearch`. For more examples and sample code, see the [Examples](#examples) section. + +If you encounter errors: +- **Deployment Not Found**: Check that deployment names in environment variables match exactly what you created in Foundry. +- **Access Denied**: Ensure you have the **Cognitive Services User** role assignment. + +### Adding the package to your product + +[//]: # ({x-version-update-start;com.azure:azure-ai-contentunderstanding;current}) +```xml + + com.azure + azure-ai-contentunderstanding + 1.0.0-beta.1 + +``` +[//]: # ({x-version-update-end}) + +### Authenticate the client + +In order to interact with the Content Understanding service, you'll need to create an instance of the `ContentUnderstandingClient` class. To authenticate the client, you need your Microsoft Foundry resource endpoint and credentials. You can use either an API key or Microsoft Entra ID authentication. + +#### Using DefaultAzureCredential + +The simplest way to authenticate is using `DefaultAzureCredential`, which supports multiple authentication methods and works well in both local development and production environments: + +```java +// Example: https://your-foundry.services.ai.azure.com/ +String endpoint = ""; +ContentUnderstandingClient client = new ContentUnderstandingClientBuilder() + .endpoint(endpoint) + .credential(new DefaultAzureCredentialBuilder().build()) + .buildClient(); +``` + +#### Using API key + +You can also authenticate using an API key from your Microsoft Foundry resource: + +```java +// Example: https://your-foundry.services.ai.azure.com/ +String endpoint = ""; +String apiKey = ""; +ContentUnderstandingClient client = new ContentUnderstandingClientBuilder() + .endpoint(endpoint) + .credential(new AzureKeyCredential(apiKey)) + .buildClient(); +``` + +> **⚠️ Security Warning**: API key authentication is less secure and is only recommended for testing purposes with test resources. For production, use `DefaultAzureCredential` or other secure authentication methods. + +To get your API key: +1. Go to [Azure Portal][azure_portal] +2. Navigate to your Microsoft Foundry resource +3. Go to **Resource Management** > **Keys and Endpoint** +4. Copy one of the **Keys** (Key1 or Key2) + +For more information on authentication, see [Azure Identity client library for Java][azure_identity]. + + + +## Key concepts + +### Prebuilt analyzers + +Content Understanding provides a rich set of prebuilt analyzers that are ready to use without any configuration. These analyzers are powered by knowledge bases of thousands of real-world document examples, enabling them to understand document structure and adapt to variations in format and content. + +Prebuilt analyzers are organized into several categories: + +* **RAG analyzers** - Optimized for retrieval-augmented generation scenarios with semantic analysis and markdown extraction. These analyzers return markdown and a one-paragraph `Summary` for each content item: + * **`prebuilt-documentSearch`** - Extracts content from documents (PDF, images, Office documents) with layout preservation, table detection, figure analysis, and structured markdown output. Optimized for RAG scenarios. + * **`prebuilt-imageSearch`** - Analyzes standalone images and returns a one-paragraph description of the image content. Optimized for image understanding and search scenarios. For images that contain text (including hand-written text), use `prebuilt-documentSearch`. + * **`prebuilt-audioSearch`** - Transcribes audio content with speaker diarization, timing information, and conversation summaries. Supports multilingual transcription. + * **`prebuilt-videoSearch`** - Analyzes video content with visual frame extraction, audio transcription, and structured summaries. Provides temporal alignment of visual and audio content and can return multiple segments per video. +* **Content extraction analyzers** - Focus on OCR and layout analysis (e.g., `prebuilt-read`, `prebuilt-layout`) +* **Base analyzers** - Fundamental content processing capabilities used as parent analyzers for custom analyzers (e.g., `prebuilt-document`, `prebuilt-image`, `prebuilt-audio`, `prebuilt-video`) +* **Domain-specific analyzers** - Preconfigured analyzers for common document categories including financial documents (invoices, receipts, bank statements), identity documents (passports, driver's licenses), tax forms, mortgage documents, and contracts +* **Utility analyzers** - Specialized tools for schema generation and field extraction (e.g., `prebuilt-documentFieldSchema`, `prebuilt-documentFields`) + +For a complete list of available prebuilt analyzers and their capabilities, see the [Prebuilt analyzers documentation][prebuilt_analyzers_docs]. + +### Content types + +The API returns different content types based on the input: + +* **`DocumentContent`** - For document files (PDF, HTML, images, Office documents such as Word, Excel, PowerPoint, and more). Provides basic information such as page count and MIME type. Retrieve detailed information including pages, tables, figures, paragraphs, and many others. +* **`AudioVisualContent`** - For audio and video files. Provides basic information such as timing information (start/end times) and frame dimensions (for video). Retrieve detailed information including transcript phrases, timing information, and for video, key frame references and more. + +### Asynchronous operations + +Content Understanding operations are asynchronous long-running operations. The workflow is: + +1. **Begin Analysis** - Start the analysis operation (returns immediately with an operation location) +2. **Poll for Results** - Poll the operation location until the analysis completes +3. **Process Results** - Extract and display the structured results + +The SDK provides `SyncPoller` and `PollerFlux` types that handle polling automatically. For analysis operations, the SDK returns pollers that provide access to the final `AnalyzeResult`. + +### Main classes + +* **`ContentUnderstandingClient`** - The synchronous client for analyzing content, as well as creating, managing, and configuring analyzers +* **`ContentUnderstandingAsyncClient`** - The asynchronous client with the same capabilities +* **`AnalyzeResult`** - Contains the structured results of an analysis operation, including content elements, markdown, and metadata + +### Thread safety + +We guarantee that all client instance methods are thread-safe and independent of each other. This ensures that the recommendation of reusing client instances is always safe, even across threads. + +### Additional concepts + +The following concepts are common across all Azure SDK client libraries: + +[Client options][azure_core_http_client] | +[Accessing the response][azure_core_response] | +[Long-running operations][azure_core_lro] | +[Handling failures][azure_core_exceptions] | +[Logging][logging] + +## Examples + +You can familiarize yourself with different APIs using [Samples][samples_directory]. + +The samples demonstrate: + +* **Configuration** - Configure model deployment defaults for prebuilt analyzers and custom analyzers +* **Document Content Extraction** - Extract structured markdown content from PDFs and images using `prebuilt-documentSearch`, optimized for RAG (Retrieval-Augmented Generation) applications +* **Multi-Modal Content Analysis** - Analyze content from URLs across all modalities: extract markdown and summaries from documents, images, audio, and video using `prebuilt-documentSearch`, `prebuilt-imageSearch`, `prebuilt-audioSearch`, and `prebuilt-videoSearch` +* **Domain-Specific Analysis** - Extract structured fields from invoices using `prebuilt-invoice` +* **Advanced Document Features** - Extract charts, hyperlinks, formulas, and annotations from documents +* **Custom Analyzers** - Create custom analyzers with field schemas for specialized extraction needs +* **Document Classification** - Create and use classifiers to categorize documents +* **Analyzer Management** - Get, list, update, copy, and delete analyzers +* **Result Management** - Retrieve result files from video analysis and delete analysis results + +See the [samples directory][samples_directory] for complete examples. + +### Running samples + +All samples can be run using Maven's `exec:java` plugin. Before running samples, ensure you have set the required environment variables (see [Step 3: Configure model deployments](#step-3-configure-model-deployments-required-for-prebuilt-analyzers)). + +**Important:** The samples support both API key and `DefaultAzureCredential` authentication. If you set `CONTENTUNDERSTANDING_KEY`, the sample will use API key authentication. If `CONTENTUNDERSTANDING_KEY` is not set, the sample will fall back to `DefaultAzureCredential` (which requires `azure-identity` dependency). + +### Option 1: Run samples in your own project (Recommended) + +The simplest way to run samples is to copy them into your own Maven project: + +1. Add the SDK dependency to your `pom.xml` (see [Adding the package to your product](#adding-the-package-to-your-product)) +2. Add `azure-identity` if using `DefaultAzureCredential`: + ```xml + + com.azure + azure-identity + 1.18.2 + + ``` +3. Copy any sample file from the [samples directory][samples_directory] to your project +4. Run it like any other Java class (e.g., `mvn compile exec:java -Dexec.mainClass="YourSampleClass"` or run from your IDE) + +### Option 2: Run samples from the SDK source repository + +If you want to run samples directly from the SDK source code: + +**Step 1: Clone and compile** + +```bash +# Clone the repository +git clone https://github.com/Azure/azure-sdk-for-java.git +cd azure-sdk-for-java/sdk/contentunderstanding/azure-ai-contentunderstanding + +# Compile the library +mvn compile -DskipTests + +# Compile sample files (samples in src/samples/java are not compiled by default) +mvn dependency:build-classpath -Dmdep.outputFile=target/classpath.txt -q +javac -cp "$(cat target/classpath.txt):target/classes" --release 8 -d target/classes src/samples/java/com/azure/ai/contentunderstanding/samples/*.java +``` + +**Step 2: Run samples** + +Choose one of the following authentication methods: + +**Option A: API key authentication** + +If you have set `CONTENTUNDERSTANDING_KEY`, you can run samples without the test classpath scope: + +```bash +# Set environment variables +export CONTENTUNDERSTANDING_ENDPOINT="https://.services.ai.azure.com/" +export CONTENTUNDERSTANDING_KEY="" + +# Run a sample (API key authentication - no test scope needed) +mvn exec:java \ + -Dexec.mainClass="com.azure.ai.contentunderstanding.samples.Sample02_AnalyzeUrl" \ + -Dexec.cleanupDaemonThreads=false +``` + +**Option B: DefaultAzureCredential authentication** + +If you don't set `CONTENTUNDERSTANDING_KEY`, the sample will use `DefaultAzureCredential`. In this case, you need to include the test classpath scope to access the `azure-identity` dependency: + +```bash +# Set environment variables (no CONTENTUNDERSTANDING_KEY set) +export CONTENTUNDERSTANDING_ENDPOINT="https://.services.ai.azure.com/" +# Ensure you're authenticated (e.g., az login) + +# Run a sample (DefaultAzureCredential - test scope required) +mvn exec:java \ + -Dexec.mainClass="com.azure.ai.contentunderstanding.samples.Sample02_AnalyzeUrl" \ + -Dexec.classpathScope=test \ + -Dexec.cleanupDaemonThreads=false +``` + +**Common sample commands:** + +```bash +# Analyze document from URL +mvn exec:java \ + -Dexec.mainClass="com.azure.ai.contentunderstanding.samples.Sample02_AnalyzeUrl" \ + -Dexec.classpathScope=test \ + -Dexec.cleanupDaemonThreads=false + +# Analyze document from binary file +mvn exec:java \ + -Dexec.mainClass="com.azure.ai.contentunderstanding.samples.Sample01_AnalyzeBinary" \ + -Dexec.classpathScope=test \ + -Dexec.cleanupDaemonThreads=false + +# Analyze invoice +mvn exec:java \ + -Dexec.mainClass="com.azure.ai.contentunderstanding.samples.Sample03_AnalyzeInvoice" \ + -Dexec.classpathScope=test \ + -Dexec.cleanupDaemonThreads=false + +# Create a custom analyzer +mvn exec:java \ + -Dexec.mainClass="com.azure.ai.contentunderstanding.samples.Sample04_CreateAnalyzer" \ + -Dexec.classpathScope=test \ + -Dexec.cleanupDaemonThreads=false +``` + +**Note:** If you always use API key authentication (always set `CONTENTUNDERSTANDING_KEY`), you can omit `-Dexec.classpathScope=test` from the commands above. However, including it doesn't hurt and ensures samples work regardless of which authentication method is used. + +## Troubleshooting + +### Common issues + +**Error: "Access denied due to invalid subscription key or wrong API endpoint"** +- Verify your `endpoint URL` is correct +- Ensure your `API key` is valid or that your Microsoft Entra ID credentials have the correct permissions +- Make sure you have the **Cognitive Services User** role assigned to your account + +**Error: "Model deployment not found" or "Default model deployment not configured"** +- Ensure you have deployed the required models (gpt-4.1, gpt-4.1-mini, text-embedding-3-large) in Microsoft Foundry +- Verify you have configured the default model deployments (see [Configure Model Deployments](#step-3-configure-model-deployments-required-for-prebuilt-analyzers)) +- Check that your deployment names match what you configured in the defaults + +**Error: "Operation failed" or timeout** +- Content Understanding operations are asynchronous and may take time to complete +- Ensure you are properly polling for results using `SyncPoller.waitForCompletion()` or `getFinalResult()` +- Check the operation status for more details about the failure + +### Enable logging + +To enable logging for debugging, configure the HTTP client with logging options: + +```java +ContentUnderstandingClient client = new ContentUnderstandingClientBuilder() + .endpoint(endpoint) + .credential(new DefaultAzureCredentialBuilder().build()) + .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) + .buildClient(); +``` + +For more information, see [Azure SDK for Java logging][logging]. + +## Next steps + +* Explore the [samples directory][samples_directory] for complete code examples +* Read the [Azure AI Content Understanding documentation][product_docs] for detailed service information + +## Contributing + +For details on contributing to this repository, see the [contributing guide][contributing]. + +1. Fork it +1. Create your feature branch (`git checkout -b my-new-feature`) +1. Commit your changes (`git commit -am 'Add some feature'`) +1. Push to the branch (`git push origin my-new-feature`) +1. Create new Pull Request + +This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information see the [Code of Conduct FAQ][code_of_conduct_faq] or contact [opencode@microsoft.com][opencode_email] with any additional questions or comments. + + +[source_code]: https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/contentunderstanding/azure-ai-contentunderstanding +[package_maven]: https://central.sonatype.com/artifact/com.azure/azure-ai-contentunderstanding +[api_reference_docs]: https://azure.github.io/azure-sdk-for-java/ +[product_docs]: https://learn.microsoft.com/azure/ai-services/content-understanding/ +[jdk]: https://learn.microsoft.com/azure/developer/java/fundamentals/ +[azure_subscription]: https://azure.microsoft.com/free/ +[azure_identity]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/identity/azure-identity/README.md +[azure_portal]: https://portal.azure.com/ +[cu_quickstart]: https://learn.microsoft.com/azure/ai-services/content-understanding/quickstart/use-rest-api?tabs=portal%2Cdocument +[cu_region_support]: https://learn.microsoft.com/azure/ai-services/content-understanding/language-region-support +[deploy_models_docs]: https://learn.microsoft.com/azure/ai-studio/how-to/deploy-models-openai +[prebuilt_analyzers_docs]: https://learn.microsoft.com/azure/ai-services/content-understanding/concepts/prebuilt-analyzers +[samples_directory]: https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples +[sample00_update_defaults]: https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample00_UpdateDefaults.java +[logging]: https://github.com/Azure/azure-sdk-for-java/wiki/Logging-in-Azure-SDK +[azure_core_http_client]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/README.md#configuring-service-clients +[azure_core_response]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/README.md#accessing-http-response-details-using-responset +[azure_core_lro]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/README.md#long-running-operations-with-pollerfluxt +[azure_core_exceptions]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/README.md#exception-hierarchy-with-azureexception +[contributing]: https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[code_of_conduct_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[opencode_email]: mailto:opencode@microsoft.com diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/assets.json b/sdk/contentunderstanding/azure-ai-contentunderstanding/assets.json new file mode 100644 index 000000000000..95eaa82c10a3 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "java", + "TagPrefix": "java/contentunderstanding/azure-ai-contentunderstanding", + "Tag": "java/contentunderstanding/azure-ai-contentunderstanding_de83e818ce" +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/cspell.json b/sdk/contentunderstanding/azure-ai-contentunderstanding/cspell.json new file mode 100644 index 000000000000..23055b47bc2d --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/cspell.json @@ -0,0 +1,18 @@ +{ + "version": "0.2", + "language": "en", + "words": [ + "dexec", + "dmdep", + "javac", + "upca", + "upce", + "UPCA", + "UPCE", + "Dtest", + "Dsurefire", + "dotenv", + "DAZURE", + "pytest" + ] +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/customization/pom.xml b/sdk/contentunderstanding/azure-ai-contentunderstanding/customization/pom.xml new file mode 100644 index 000000000000..51ae021c93cb --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/customization/pom.xml @@ -0,0 +1,21 @@ + + + 4.0.0 + + + com.azure + azure-code-customization-parent + 1.0.0-beta.1 + ../../../parents/azure-code-customization-parent + + + Microsoft Azure Content Understanding client for Java + This package contains client customization for Microsoft Azure Content Understanding + + com.azure.tools + azure-contentunderstanding-customization + 1.0.0-beta.1 + jar + diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/customization/src/main/java/ContentUnderstandingCustomizations.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/customization/src/main/java/ContentUnderstandingCustomizations.java new file mode 100644 index 000000000000..9a041e7c6cfb --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/customization/src/main/java/ContentUnderstandingCustomizations.java @@ -0,0 +1,1361 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import com.azure.autorest.customization.ClassCustomization; +import com.azure.autorest.customization.Customization; +import com.azure.autorest.customization.LibraryCustomization; +import com.azure.autorest.customization.PackageCustomization; +import com.github.javaparser.StaticJavaParser; +import com.github.javaparser.ast.Modifier; +import com.github.javaparser.ast.Node; +import com.github.javaparser.ast.body.ClassOrInterfaceDeclaration; +import com.github.javaparser.ast.body.InitializerDeclaration; +import com.github.javaparser.ast.body.MethodDeclaration; +import com.github.javaparser.ast.nodeTypes.NodeWithModifiers; +import com.github.javaparser.javadoc.Javadoc; +import com.github.javaparser.javadoc.description.JavadocDescription; +import org.slf4j.Logger; + +/** + * Customization class for Content Understanding SDK. + * This class contains customization code to modify the AutoRest/TypeSpec generated code. + */ +public class ContentUnderstandingCustomizations extends Customization { + + private static final String PACKAGE_NAME = "com.azure.ai.contentunderstanding"; + private static final String MODELS_PACKAGE = "com.azure.ai.contentunderstanding.models"; + private static final String IMPLEMENTATION_PACKAGE = "com.azure.ai.contentunderstanding.implementation"; + + @Override + public void customize(LibraryCustomization customization, Logger logger) { + // 1. Add operationId field to AnalyzeResult model + customizeAnalyzeResult(customization, logger); + + // 2. Customize PollingUtils to add parseOperationId method + customizePollingUtils(customization, logger); + + // 3. Customize PollingStrategy to extract and set operationId + customizePollingStrategy(customization, logger); + + // 4. Fix generated beginAnalyze/beginAnalyzeBinary bodies to call impl with utf16 (generator no longer emits stringEncoding overloads) + fixGeneratedAnalyzeBodiesToCallImplWithUtf16(customization, logger); + + // 5. Add static accessor helper for operationId + addStaticAccessorForOperationId(customization, logger); + + // 6. Add convenience methods to model classes (equivalent to *.Extensions.cs) + customizeContentFieldExtensions(customization, logger); + customizeArrayFieldExtensions(customization, logger); + customizeObjectFieldExtensions(customization, logger); + + // 7. SERVICE-FIX: Add keyFrameTimesMs case-insensitive deserialization + customizeAudioVisualContentDeserialization(customization, logger); + + // 8. Hide methods that expose stringEncoding parameter (if generator still emits them) + hideStringEncodingMethods(customization, logger); + + // 9. Fix generated 2-param beginAnalyzeBinary body if present (generator may use undefined contentType) + fixBeginAnalyzeBinaryTwoParamBody(customization, logger); + + // 10. Make ContentUnderstandingDefaults constructor public for updateDefaults convenience methods + customizeContentUnderstandingDefaults(customization, logger); + + // 11. Add updateDefaults convenience methods (TypeSpec disabled these, but auto-generates updateAnalyzer) + addUpdateDefaultsConvenienceMethods(customization, logger); + + // 12. Add beginAnalyzeBinary convenience overloads (no stringEncoding) + addBeginAnalyzeBinaryConvenienceOverloads(customization, logger); + + // 13. Add beginAnalyze convenience overloads (no stringEncoding) + addBeginAnalyzeConvenienceOverloads(customization, logger); + } + + /** + * Add operationId field and getter/setter to ContentAnalyzerAnalyzeOperationStatus + */ + private void customizeAnalyzeResult(LibraryCustomization customization, Logger logger) { + logger.info("Customizing ContentAnalyzerAnalyzeOperationStatus to add operationId field"); + + customization.getClass(MODELS_PACKAGE, "ContentAnalyzerAnalyzeOperationStatus") + .customizeAst(ast -> ast.getClassByName("ContentAnalyzerAnalyzeOperationStatus").ifPresent(clazz -> { + // Remove @Immutable annotation if present + clazz.getAnnotationByName("Immutable").ifPresent(Node::remove); + + // Add operationId field + clazz.addField("String", "operationId", Modifier.Keyword.PRIVATE); + + // Add public getter for operationId + clazz.addMethod("getOperationId", Modifier.Keyword.PUBLIC) + .setType("String") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Gets the operationId property: The unique ID of the analyze operation. " + + "Use this ID with getResultFile() and deleteResult() methods.")) + .addBlockTag("return", "the operationId value.")) + .setBody(StaticJavaParser.parseBlock("{ return operationId; }")); + + // Add private setter for operationId (used by helper) + clazz.addMethod("setOperationId", Modifier.Keyword.PRIVATE) + .setType("void") + .addParameter("String", "operationId") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Sets the operationId property: The unique ID of the analyze operation.")) + .addBlockTag("param", "operationId the operationId value to set.")) + .setBody(StaticJavaParser.parseBlock("{ this.operationId = operationId; }")); + })); + } + + /** + * Add parseOperationId method to PollingUtils + */ + private void customizePollingUtils(LibraryCustomization customization, Logger logger) { + logger.info("Customizing PollingUtils to add parseOperationId method"); + + customization.getClass(IMPLEMENTATION_PACKAGE, "PollingUtils").customizeAst(ast -> { + ast.addImport("java.util.regex.Matcher"); + ast.addImport("java.util.regex.Pattern"); + + ast.getClassByName("PollingUtils").ifPresent(clazz -> { + // Add regex pattern for extracting operationId from Operation-Location header + // Example: https://endpoint/contentunderstanding/analyzers/myAnalyzer/results/operationId?api-version=xxx + clazz.addFieldWithInitializer("Pattern", "OPERATION_ID_PATTERN", + StaticJavaParser.parseExpression("Pattern.compile(\"[^:]+://[^/]+/contentunderstanding/.+/([^?/]+)\")"), + Modifier.Keyword.PRIVATE, Modifier.Keyword.STATIC, Modifier.Keyword.FINAL); + + // Add parseOperationId method + clazz.addMethod("parseOperationId", Modifier.Keyword.STATIC) + .setType("String") + .addParameter("String", "operationLocationHeader") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Parses the operationId from the Operation-Location header.")) + .addBlockTag("param", "operationLocationHeader the Operation-Location header value.") + .addBlockTag("return", "the operationId, or null if not found.")) + .setBody(StaticJavaParser.parseBlock("{ " + + "if (CoreUtils.isNullOrEmpty(operationLocationHeader)) { return null; }" + + "Matcher matcher = OPERATION_ID_PATTERN.matcher(operationLocationHeader);" + + "if (matcher.find() && matcher.group(1) != null) { return matcher.group(1); }" + + "return null; }")); + }); + }); + } + + /** + * Customize polling strategies to extract operationId and set it on the result + */ + private void customizePollingStrategy(LibraryCustomization customization, Logger logger) { + logger.info("Customizing SyncOperationLocationPollingStrategy class"); + PackageCustomization packageCustomization = customization.getPackage(IMPLEMENTATION_PACKAGE); + + packageCustomization.getClass("SyncOperationLocationPollingStrategy").customizeAst(ast -> + ast.addImport("com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus") + .addImport("com.azure.ai.contentunderstanding.implementation.ContentAnalyzerAnalyzeOperationStatusHelper") + .getClassByName("SyncOperationLocationPollingStrategy").ifPresent(this::addSyncPollOverrideMethod)); + + logger.info("Customizing OperationLocationPollingStrategy class"); + packageCustomization.getClass("OperationLocationPollingStrategy").customizeAst(ast -> + ast.addImport("com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus") + .addImport("com.azure.ai.contentunderstanding.implementation.ContentAnalyzerAnalyzeOperationStatusHelper") + .getClassByName("OperationLocationPollingStrategy").ifPresent(this::addAsyncPollOverrideMethod)); + } + + private void addSyncPollOverrideMethod(ClassOrInterfaceDeclaration clazz) { + clazz.addMethod("poll", Modifier.Keyword.PUBLIC) + .setType("PollResponse") + .addParameter("PollingContext", "pollingContext") + .addParameter("TypeReference", "pollResponseType") + .addMarkerAnnotation(Override.class) + .setBody(StaticJavaParser.parseBlock("{ " + + "PollResponse pollResponse = super.poll(pollingContext, pollResponseType);" + + "String operationLocationHeader = pollingContext.getData(String.valueOf(PollingUtils.OPERATION_LOCATION_HEADER));" + + "String operationId = null;" + + "if (operationLocationHeader != null) {" + + " operationId = PollingUtils.parseOperationId(operationLocationHeader);" + + "}" + + "if (pollResponse.getValue() instanceof ContentAnalyzerAnalyzeOperationStatus) {" + + " ContentAnalyzerAnalyzeOperationStatus operation = (ContentAnalyzerAnalyzeOperationStatus) pollResponse.getValue();" + + " ContentAnalyzerAnalyzeOperationStatusHelper.setOperationId(operation, operationId);" + + "}" + + "return pollResponse; }")); + } + + private void addAsyncPollOverrideMethod(ClassOrInterfaceDeclaration clazz) { + clazz.addMethod("poll", Modifier.Keyword.PUBLIC) + .setType("Mono>") + .addParameter("PollingContext", "pollingContext") + .addParameter("TypeReference", "pollResponseType") + .addMarkerAnnotation(Override.class) + .setBody(StaticJavaParser.parseBlock("{ return super.poll(pollingContext, pollResponseType)" + + ".map(pollResponse -> {" + + " String operationLocationHeader = pollingContext.getData(String.valueOf(PollingUtils.OPERATION_LOCATION_HEADER));" + + " String operationId = null;" + + " if (operationLocationHeader != null) {" + + " operationId = PollingUtils.parseOperationId(operationLocationHeader);" + + " }" + + " if (pollResponse.getValue() instanceof ContentAnalyzerAnalyzeOperationStatus) {" + + " ContentAnalyzerAnalyzeOperationStatus operation = (ContentAnalyzerAnalyzeOperationStatus) pollResponse.getValue();" + + " ContentAnalyzerAnalyzeOperationStatusHelper.setOperationId(operation, operationId);" + + " }" + + " return pollResponse;" + + "}); }")); + } + + /** + * Fix generated 4-param beginAnalyze and 5-param beginAnalyzeBinary bodies to call the impl with utf16. + * After TypeSpec commit 31f87d83 the generator no longer emits 5-param beginAnalyze or 6-param beginAnalyzeBinary; + * the generated 4-param and 5-param methods call those non-existent overloads. This customization rewrites + * their bodies to call serviceClient (impl) directly with stringEncoding "utf16" in RequestOptions. + */ + private void fixGeneratedAnalyzeBodiesToCallImplWithUtf16(LibraryCustomization customization, Logger logger) { + logger.info("Fixing generated beginAnalyze/beginAnalyzeBinary bodies to call impl with utf16"); + + // Sync client: fix 4-param beginAnalyze body + customization.getClass(PACKAGE_NAME, "ContentUnderstandingClient").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.implementation.models.AnalyzeRequest1"); + ast.addImport("com.azure.core.util.BinaryData"); + ast.getClassByName("ContentUnderstandingClient").ifPresent(clazz -> { + for (MethodDeclaration method : clazz.getMethods()) { + if ("beginAnalyze".equals(method.getNameAsString()) && method.getParameters().size() == 4) { + method.setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "if (processingLocation != null) { requestOptions.addQueryParam(\"processingLocation\", processingLocation.toString(), false); }" + + "requestOptions.addQueryParam(\"stringEncoding\", \"utf16\", false);" + + "AnalyzeRequest1 analyzeRequest1Obj = new AnalyzeRequest1().setInputs(inputs).setModelDeployments(modelDeployments);" + + "BinaryData analyzeRequest1 = BinaryData.fromObject(analyzeRequest1Obj);" + + "return serviceClient.beginAnalyzeWithModel(analyzerId, analyzeRequest1, requestOptions); }")); + break; + } + } + // Fix 5-param beginAnalyzeBinary body + for (MethodDeclaration method : clazz.getMethods()) { + if ("beginAnalyzeBinary".equals(method.getNameAsString()) && method.getParameters().size() == 5) { + method.setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "if (inputRange != null) { requestOptions.addQueryParam(\"range\", inputRange, false); }" + + "if (processingLocation != null) { requestOptions.addQueryParam(\"processingLocation\", processingLocation.toString(), false); }" + + "requestOptions.addQueryParam(\"stringEncoding\", \"utf16\", false);" + + "return serviceClient.beginAnalyzeBinaryWithModel(analyzerId, contentType, binaryInput, requestOptions); }")); + break; + } + } + }); + }); + + // Async client: fix 4-param beginAnalyze body + customization.getClass(PACKAGE_NAME, "ContentUnderstandingAsyncClient").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.implementation.models.AnalyzeRequest1"); + ast.addImport("com.azure.core.util.BinaryData"); + ast.getClassByName("ContentUnderstandingAsyncClient").ifPresent(clazz -> { + for (MethodDeclaration method : clazz.getMethods()) { + if ("beginAnalyze".equals(method.getNameAsString()) && method.getParameters().size() == 4) { + method.setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "if (processingLocation != null) { requestOptions.addQueryParam(\"processingLocation\", processingLocation.toString(), false); }" + + "requestOptions.addQueryParam(\"stringEncoding\", \"utf16\", false);" + + "AnalyzeRequest1 analyzeRequest1Obj = new AnalyzeRequest1().setInputs(inputs).setModelDeployments(modelDeployments);" + + "BinaryData analyzeRequest1 = BinaryData.fromObject(analyzeRequest1Obj);" + + "return serviceClient.beginAnalyzeWithModelAsync(analyzerId, analyzeRequest1, requestOptions); }")); + break; + } + } + // Fix 5-param beginAnalyzeBinary body + for (MethodDeclaration method : clazz.getMethods()) { + if ("beginAnalyzeBinary".equals(method.getNameAsString()) && method.getParameters().size() == 5) { + method.setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "if (inputRange != null) { requestOptions.addQueryParam(\"range\", inputRange, false); }" + + "if (processingLocation != null) { requestOptions.addQueryParam(\"processingLocation\", processingLocation.toString(), false); }" + + "requestOptions.addQueryParam(\"stringEncoding\", \"utf16\", false);" + + "return serviceClient.beginAnalyzeBinaryWithModelAsync(analyzerId, contentType, binaryInput, requestOptions); }")); + break; + } + } + }); + }); + } + + /** + * Customize client methods to: + * 1. Hide methods with stringEncoding parameter (make them package-private) + * 2. Add simplified overloads that use "utf16" as default + */ + /** + * Add simplified beginAnalyze methods that hide the stringEncoding parameter. + * This matches .NET's approach of hiding stringEncoding while keeping processingLocation and modelDeployments. + * NOTE: After generator change (commit 31f87d83) the generator may already emit 4-param and 2-param; this adds them only if not present. + */ + private void addSimplifiedAnalyzeMethods(LibraryCustomization customization, Logger logger) { + logger.info("Adding simplified beginAnalyze methods without stringEncoding parameter"); + + // Add to sync client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingClient").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.models.AnalyzeInput"); + ast.addImport("com.azure.ai.contentunderstanding.models.ProcessingLocation"); + ast.addImport("com.azure.ai.contentunderstanding.implementation.models.AnalyzeRequest1"); + ast.addImport("com.azure.core.http.rest.RequestOptions"); + ast.addImport("com.azure.core.util.BinaryData"); + ast.addImport("java.util.List"); + ast.addImport("java.util.Map"); + ast.addImport("com.azure.core.annotation.ServiceMethod"); + ast.addImport("com.azure.core.annotation.ReturnType"); + ast.getClassByName("ContentUnderstandingClient").ifPresent(clazz -> { + // Add overload with all optional parameters (matches .NET parameter order) + clazz.addMethod("beginAnalyze", Modifier.Keyword.PUBLIC) + .setType("SyncPoller") + .addParameter("String", "analyzerId") + .addParameter("List", "inputs") + .addParameter("Map", "modelDeployments") + .addParameter("ProcessingLocation", "processingLocation") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from inputs. " + + "This is a convenience method that uses default string encoding (utf16).")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "inputs The inputs to analyze.") + .addBlockTag("param", "modelDeployments Custom model deployment mappings. Set to null to use service defaults.") + .addBlockTag("param", "processingLocation The processing location for the analysis. Set to null to use the service default.") + .addBlockTag("return", "the {@link SyncPoller} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyze(analyzerId, inputs, modelDeployments, processingLocation, \"utf16\"); }")); + + // Add simplified overload with only analyzerId and inputs (most common usage) + clazz.addMethod("beginAnalyze", Modifier.Keyword.PUBLIC) + .setType("SyncPoller") + .addParameter("String", "analyzerId") + .addParameter("List", "inputs") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from inputs. " + + "This is a convenience method that uses default string encoding (utf16), " + + "service default model deployments, and global processing location.")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "inputs The inputs to analyze.") + .addBlockTag("return", "the {@link SyncPoller} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyze(analyzerId, inputs, null, null); }")); + }); + }); + + // Add to async client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingAsyncClient").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.models.AnalyzeInput"); + ast.addImport("com.azure.ai.contentunderstanding.models.ProcessingLocation"); + ast.addImport("com.azure.ai.contentunderstanding.implementation.models.AnalyzeRequest1"); + ast.addImport("com.azure.core.http.rest.RequestOptions"); + ast.addImport("com.azure.core.util.BinaryData"); + ast.addImport("java.util.List"); + ast.addImport("java.util.Map"); + ast.addImport("com.azure.core.annotation.ServiceMethod"); + ast.addImport("com.azure.core.annotation.ReturnType"); + ast.getClassByName("ContentUnderstandingAsyncClient").ifPresent(clazz -> { + // Add overload with all optional parameters (matches .NET parameter order) + clazz.addMethod("beginAnalyze", Modifier.Keyword.PUBLIC) + .setType("PollerFlux") + .addParameter("String", "analyzerId") + .addParameter("List", "inputs") + .addParameter("Map", "modelDeployments") + .addParameter("ProcessingLocation", "processingLocation") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from inputs. " + + "This is a convenience method that uses default string encoding (utf16).")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "inputs The inputs to analyze.") + .addBlockTag("param", "modelDeployments Custom model deployment mappings. Set to null to use service defaults.") + .addBlockTag("param", "processingLocation The processing location for the analysis. Set to null to use the service default.") + .addBlockTag("return", "the {@link PollerFlux} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "return beginAnalyze(analyzerId, inputs, modelDeployments, processingLocation, \"utf16\"); }")); + + // Add simplified overload with only analyzerId and inputs (most common usage) + clazz.addMethod("beginAnalyze", Modifier.Keyword.PUBLIC) + .setType("PollerFlux") + .addParameter("String", "analyzerId") + .addParameter("List", "inputs") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from inputs. " + + "This is a convenience method that uses default string encoding (utf16), " + + "service default model deployments, and global processing location.")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "inputs The inputs to analyze.") + .addBlockTag("return", "the {@link PollerFlux} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyze(analyzerId, inputs, null, null); }")); + }); + }); + } + + /** + * Add static accessor helper for setting operationId on ContentAnalyzerAnalyzeOperationStatus + */ + private void addStaticAccessorForOperationId(LibraryCustomization customization, Logger logger) { + logger.info("Adding ContentAnalyzerAnalyzeOperationStatusHelper class"); + + // First, add the static initializer block to ContentAnalyzerAnalyzeOperationStatus + customization.getClass(MODELS_PACKAGE, "ContentAnalyzerAnalyzeOperationStatus").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.implementation.ContentAnalyzerAnalyzeOperationStatusHelper"); + ast.getClassByName("ContentAnalyzerAnalyzeOperationStatus").ifPresent(clazz -> + clazz.getMembers().add(0, new InitializerDeclaration(true, + StaticJavaParser.parseBlock("{" + + "ContentAnalyzerAnalyzeOperationStatusHelper.setAccessor(" + + "new ContentAnalyzerAnalyzeOperationStatusHelper.ContentAnalyzerAnalyzeOperationStatusAccessor() {" + + " @Override" + + " public void setOperationId(ContentAnalyzerAnalyzeOperationStatus status, String operationId) {" + + " status.setOperationId(operationId);" + + " }" + + "}); }")))); + }); + + // Create the helper class file + String helperContent = + "// Copyright (c) Microsoft Corporation. All rights reserved.\n" + + "// Licensed under the MIT License.\n" + + "package com.azure.ai.contentunderstanding.implementation;\n\n" + + "import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus;\n\n" + + "/**\n" + + " * Helper class to access private members of ContentAnalyzerAnalyzeOperationStatus.\n" + + " */\n" + + "public final class ContentAnalyzerAnalyzeOperationStatusHelper {\n" + + " private static ContentAnalyzerAnalyzeOperationStatusAccessor accessor;\n\n" + + " /**\n" + + " * Interface for accessing private members.\n" + + " */\n" + + " public interface ContentAnalyzerAnalyzeOperationStatusAccessor {\n" + + " void setOperationId(ContentAnalyzerAnalyzeOperationStatus status, String operationId);\n" + + " }\n\n" + + " /**\n" + + " * Sets the accessor.\n" + + " * @param accessorInstance the accessor instance.\n" + + " */\n" + + " public static void setAccessor(ContentAnalyzerAnalyzeOperationStatusAccessor accessorInstance) {\n" + + " accessor = accessorInstance;\n" + + " }\n\n" + + " /**\n" + + " * Sets the operationId on a ContentAnalyzerAnalyzeOperationStatus instance.\n" + + " * @param status the status instance.\n" + + " * @param operationId the operationId to set.\n" + + " */\n" + + " public static void setOperationId(ContentAnalyzerAnalyzeOperationStatus status, String operationId) {\n" + + " accessor.setOperationId(status, operationId);\n" + + " }\n\n" + + " private ContentAnalyzerAnalyzeOperationStatusHelper() {\n" + + " }\n" + + "}\n"; + + customization.getRawEditor().addFile( + "src/main/java/com/azure/ai/contentunderstanding/implementation/ContentAnalyzerAnalyzeOperationStatusHelper.java", + helperContent); + } + + // =================== Extensions equivalent implementations =================== + + /** + * Add getValue() method to ContentField class (equivalent to ContentField.Extensions.cs) + * This allows users to get the typed value regardless of the field subtype. + */ + private void customizeContentFieldExtensions(LibraryCustomization customization, Logger logger) { + logger.info("Adding getValue() method to ContentField class"); + + customization.getClass(MODELS_PACKAGE, "ContentField").customizeAst(ast -> + ast.getClassByName("ContentField").ifPresent(clazz -> { + // Add getValue() method that returns Object based on the actual type + clazz.addMethod("getValue", Modifier.Keyword.PUBLIC) + .setType("Object") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Gets the value of the field, regardless of its type.\n" + + "Returns the appropriate typed value for each field type:\n" + + "- StringField: returns String (from getValueString())\n" + + "- NumberField: returns Double (from getValueNumber())\n" + + "- IntegerField: returns Long (from getValueInteger())\n" + + "- DateField: returns LocalDate (from getValueDate())\n" + + "- TimeField: returns String (from getValueTime())\n" + + "- BooleanField: returns Boolean (from isValueBoolean())\n" + + "- ObjectField: returns Map (from getValueObject())\n" + + "- ArrayField: returns List (from getValueArray())\n" + + "- JsonField: returns String (from getValueJson())")) + .addBlockTag("return", "the field value, or null if not available.")) + .setBody(StaticJavaParser.parseBlock("{" + + "if (this instanceof StringField) { return ((StringField) this).getValueString(); }" + + "if (this instanceof NumberField) { return ((NumberField) this).getValueNumber(); }" + + "if (this instanceof IntegerField) { return ((IntegerField) this).getValueInteger(); }" + + "if (this instanceof DateField) { return ((DateField) this).getValueDate(); }" + + "if (this instanceof TimeField) { return ((TimeField) this).getValueTime(); }" + + "if (this instanceof BooleanField) { return ((BooleanField) this).isValueBoolean(); }" + + "if (this instanceof ObjectField) { return ((ObjectField) this).getValueObject(); }" + + "if (this instanceof ArrayField) { return ((ArrayField) this).getValueArray(); }" + + "if (this instanceof JsonField) { return ((JsonField) this).getValueJson(); }" + + "return null; }")); + })); + } + + /** + * Add convenience methods to ArrayField class (equivalent to ArrayField.Extensions.cs) + */ + private void customizeArrayFieldExtensions(LibraryCustomization customization, Logger logger) { + logger.info("Adding convenience methods to ArrayField class"); + + customization.getClass(MODELS_PACKAGE, "ArrayField").customizeAst(ast -> { + ast.addImport("com.azure.core.util.logging.ClientLogger"); + ast.getClassByName("ArrayField").ifPresent(clazz -> { + // Add static ClientLogger for throwing through Azure SDK lint (ThrowFromClientLoggerCheck) + clazz.addFieldWithInitializer("ClientLogger", "LOGGER", + StaticJavaParser.parseExpression("new ClientLogger(ArrayField.class)"), + Modifier.Keyword.PRIVATE, Modifier.Keyword.STATIC, Modifier.Keyword.FINAL); + + // Add size() method - equivalent to Count property in C# + clazz.addMethod("size", Modifier.Keyword.PUBLIC) + .setType("int") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Gets the number of items in the array.")) + .addBlockTag("return", "the number of items in the array, or 0 if the array is null.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return getValueArray() != null ? getValueArray().size() : 0; }")); + + // Add get(int index) method - equivalent to indexer in C# (throw via ClientLogger per SDK lint) + clazz.addMethod("get", Modifier.Keyword.PUBLIC) + .setType("ContentField") + .addParameter("int", "index") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Gets a field from the array by index.")) + .addBlockTag("param", "index The zero-based index of the field to retrieve.") + .addBlockTag("return", "The field at the specified index.") + .addBlockTag("throws", "IndexOutOfBoundsException if the index is out of range.")) + .setBody(StaticJavaParser.parseBlock("{" + + "if (getValueArray() == null || index < 0 || index >= getValueArray().size()) {" + + " throw LOGGER.logThrowableAsError(new IndexOutOfBoundsException(\"Index \" + index + \" is out of range. Array has \" + size() + \" elements.\"));" + + "}" + + "return getValueArray().get(index); }")); + }); + }); + } + + /** + * Add convenience methods to ObjectField class (equivalent to ObjectField.Extensions.cs) + */ + private void customizeObjectFieldExtensions(LibraryCustomization customization, Logger logger) { + logger.info("Adding convenience methods to ObjectField class"); + + customization.getClass(MODELS_PACKAGE, "ObjectField").customizeAst(ast -> { + ast.addImport("com.azure.core.util.logging.ClientLogger"); + ast.addImport("java.util.NoSuchElementException"); + ast.getClassByName("ObjectField").ifPresent(clazz -> { + // Add static ClientLogger for throwing through Azure SDK lint (ThrowFromClientLoggerCheck) + clazz.addFieldWithInitializer("ClientLogger", "LOGGER", + StaticJavaParser.parseExpression("new ClientLogger(ObjectField.class)"), + Modifier.Keyword.PRIVATE, Modifier.Keyword.STATIC, Modifier.Keyword.FINAL); + + // Add getField(String fieldName) method - equivalent to indexer in C# (throw via ClientLogger per SDK lint) + clazz.addMethod("getField", Modifier.Keyword.PUBLIC) + .setType("ContentField") + .addParameter("String", "fieldName") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Gets a field from the object by name.")) + .addBlockTag("param", "fieldName The name of the field to retrieve.") + .addBlockTag("return", "The field if found.") + .addBlockTag("throws", "IllegalArgumentException if fieldName is null or empty.") + .addBlockTag("throws", "NoSuchElementException if the field is not found.")) + .setBody(StaticJavaParser.parseBlock("{" + + "if (fieldName == null || fieldName.isEmpty()) {" + + " throw LOGGER.logThrowableAsError(new IllegalArgumentException(\"fieldName cannot be null or empty.\"));" + + "}" + + "if (getValueObject() != null && getValueObject().containsKey(fieldName)) {" + + " return getValueObject().get(fieldName);" + + "}" + + "throw LOGGER.logThrowableAsError(new java.util.NoSuchElementException(\"Field '\" + fieldName + \"' was not found in the object.\")); }")); + + // Add getFieldOrDefault(String fieldName) method - returns null if not found + clazz.addMethod("getFieldOrDefault", Modifier.Keyword.PUBLIC) + .setType("ContentField") + .addParameter("String", "fieldName") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Gets a field from the object by name, or null if the field does not exist.")) + .addBlockTag("param", "fieldName The name of the field to retrieve.") + .addBlockTag("return", "The field if found, or null if not found.")) + .setBody(StaticJavaParser.parseBlock("{" + + "if (fieldName == null || fieldName.isEmpty() || getValueObject() == null) {" + + " return null;" + + "}" + + "return getValueObject().get(fieldName); }")); + }); + }); + } + + // =================== SERVICE-FIX implementations =================== + + /** + * SERVICE-FIX: Customize AudioVisualContent deserialization to handle both "keyFrameTimesMs" + * and "KeyFrameTimesMs" (capital K) property names for forward-compatibility when the service + * fixes the casing issue. + */ + private void customizeAudioVisualContentDeserialization(LibraryCustomization customization, Logger logger) { + logger.info("SERVICE-FIX: Customizing AudioVisualContent to handle keyFrameTimesMs casing"); + + customization.getClass(MODELS_PACKAGE, "AudioVisualContent").customizeAst(ast -> + ast.getClassByName("AudioVisualContent").ifPresent(clazz -> { + // Find the fromJson method and modify the keyFrameTimesMs handling + clazz.getMethodsByName("fromJson").forEach(method -> { + method.getBody().ifPresent(body -> { + String currentBody = body.toString(); + // Replace the exact match for keyFrameTimesMs with case-insensitive handling + // Original: } else if ("keyFrameTimesMs".equals(fieldName)) { + // New: } else if ("keyFrameTimesMs".equals(fieldName) || "KeyFrameTimesMs".equals(fieldName)) { + String updatedBody = currentBody.replace( + "} else if (\"keyFrameTimesMs\".equals(fieldName)) {", + "} else if (\"keyFrameTimesMs\".equals(fieldName) || \"KeyFrameTimesMs\".equals(fieldName)) {" + ); + + // Also wrap the keyFrameTimesMs assignment to prevent overwriting if both casings present + // Original: keyFrameTimesMs = reader.readArray(reader1 -> reader1.getLong()); + // New: if (keyFrameTimesMs == null) { keyFrameTimesMs = reader.readArray(...); } + updatedBody = updatedBody.replace( + "keyFrameTimesMs = reader.readArray(reader1 -> reader1.getLong());", + "if (keyFrameTimesMs == null) { keyFrameTimesMs = reader.readArray(reader1 -> reader1.getLong()); }" + ); + + method.setBody(StaticJavaParser.parseBlock(updatedBody)); + }); + }); + })); + } + + /** + * Add simplified beginAnalyzeBinary methods that don't require contentType parameter. + * When contentType is not specified, defaults to "application/octet-stream". + */ + private void addSimplifiedAnalyzeBinaryMethods(LibraryCustomization customization, Logger logger) { + logger.info("Adding simplified beginAnalyzeBinary methods with default contentType"); + + // NOTE: Generator now produces both beginAnalyzeBinary convenience methods (2-param and 3-param), + // so no customization needed for beginAnalyzeBinary. This method is now a no-op. + } + + /** + * Hide generated methods that expose stringEncoding parameter by making them package-private. + * This prevents stringEncoding from appearing in the public API while still allowing delegation + * from simplified overloads that use utf16 by default. + */ + private void hideStringEncodingMethods(LibraryCustomization customization, Logger logger) { + logger.info("Hiding methods that expose stringEncoding (making package-private)"); + + for (String clientClassName : new String[] { "ContentUnderstandingClient", "ContentUnderstandingAsyncClient" }) { + customization.getClass(PACKAGE_NAME, clientClassName).customizeAst(ast -> + ast.getClassByName(clientClassName).ifPresent(clazz -> { + for (MethodDeclaration method : clazz.getMethods()) { + String name = method.getNameAsString(); + int paramCount = method.getParameters().size(); + + // Hide 1-param beginAnalyze (useless - creates empty AnalyzeRequest1) + if ("beginAnalyze".equals(name) && paramCount == 1) { + method.removeModifier(Modifier.Keyword.PUBLIC); + } + // Hide 2-param beginAnalyze (has stringEncoding parameter) + else if ("beginAnalyze".equals(name) && paramCount == 2) { + method.removeModifier(Modifier.Keyword.PUBLIC); + } + // Hide 5-param beginAnalyze (has stringEncoding parameter) + else if ("beginAnalyze".equals(name) && paramCount == 5) { + method.removeModifier(Modifier.Keyword.PUBLIC); + } + // Remove 3-param beginAnalyzeBinary (stringEncoding) to avoid signature conflict + else if ("beginAnalyzeBinary".equals(name) && paramCount == 3) { + method.remove(); + } + // Hide 6-param beginAnalyzeBinary (has stringEncoding parameter) + else if ("beginAnalyzeBinary".equals(name) && paramCount == 6) { + method.removeModifier(Modifier.Keyword.PUBLIC); + } + } + })); + } + } + + /** + * Add public beginAnalyzeBinary(analyzerId, binaryInput, inputRange, contentType, processingLocation) overload + * that delegates to the 6-param method with stringEncoding "utf16". The 6-param method is hidden by + * hideStringEncodingMethods. + */ + private void addBeginAnalyzeBinaryFiveParamOverload(LibraryCustomization customization, Logger logger) { + logger.info("Adding 5-param beginAnalyzeBinary overload with default stringEncoding utf16"); + + // Sync client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingClient").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.models.ProcessingLocation"); + ast.addImport("com.azure.core.annotation.ServiceMethod"); + ast.addImport("com.azure.core.annotation.ReturnType"); + ast.addImport("com.azure.core.util.BinaryData"); + ast.getClassByName("ContentUnderstandingClient").ifPresent(clazz -> { + clazz.addMethod("beginAnalyzeBinary", Modifier.Keyword.PUBLIC) + .setType("SyncPoller") + .addParameter("String", "analyzerId") + .addParameter("BinaryData", "binaryInput") + .addParameter("String", "inputRange") + .addParameter("String", "contentType") + .addParameter("ProcessingLocation", "processingLocation") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from binary input. Uses default string encoding (utf16).")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "binaryInput The binary content of the document to analyze.") + .addBlockTag("param", "inputRange Range of the input to analyze (ex. 1-3,5,9-). Document content uses 1-based page numbers; audio visual uses milliseconds.") + .addBlockTag("param", "contentType Request content type.") + .addBlockTag("param", "processingLocation The location where the data may be processed. Set to null for service default.") + .addBlockTag("return", "the {@link SyncPoller} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyzeBinary(analyzerId, binaryInput, inputRange, contentType, processingLocation, \"utf16\"); }")); + }); + }); + + // Async client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingAsyncClient").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.models.ProcessingLocation"); + ast.addImport("com.azure.core.annotation.ServiceMethod"); + ast.addImport("com.azure.core.annotation.ReturnType"); + ast.addImport("com.azure.core.util.BinaryData"); + ast.getClassByName("ContentUnderstandingAsyncClient").ifPresent(clazz -> { + clazz.addMethod("beginAnalyzeBinary", Modifier.Keyword.PUBLIC) + .setType("PollerFlux") + .addParameter("String", "analyzerId") + .addParameter("BinaryData", "binaryInput") + .addParameter("String", "inputRange") + .addParameter("String", "contentType") + .addParameter("ProcessingLocation", "processingLocation") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from binary input. Uses default string encoding (utf16).")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "binaryInput The binary content of the document to analyze.") + .addBlockTag("param", "inputRange Range of the input to analyze (ex. 1-3,5,9-). Document content uses 1-based page numbers; audio visual uses milliseconds.") + .addBlockTag("param", "contentType Request content type.") + .addBlockTag("param", "processingLocation The location where the data may be processed. Set to null for service default.") + .addBlockTag("return", "the {@link PollerFlux} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyzeBinary(analyzerId, binaryInput, inputRange, contentType, processingLocation, \"utf16\"); }")); + }); + }); + } + + /** + * Fix generated 2-param beginAnalyzeBinary(analyzerId, binaryInput) body. + * The generator emits this overload but the body uses undefined variable contentType; + * replace with "application/octet-stream". + */ + private void fixBeginAnalyzeBinaryTwoParamBody(LibraryCustomization customization, Logger logger) { + logger.info("Fixing 2-param beginAnalyzeBinary body to use application/octet-stream"); + + // Sync client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingClient").customizeAst(ast -> + ast.getClassByName("ContentUnderstandingClient").ifPresent(clazz -> { + boolean found = false; + for (MethodDeclaration method : clazz.getMethods()) { + if ("beginAnalyzeBinary".equals(method.getNameAsString()) + && method.getParameters().size() == 2) { + method.setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "return serviceClient.beginAnalyzeBinaryWithModel(analyzerId, \"application/octet-stream\", binaryInput, requestOptions); }")); + found = true; + break; + } + } + logger.info("Sync beginAnalyzeBinary 2-param found for body fix: {}", found); + })); + + // Async client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingAsyncClient").customizeAst(ast -> + ast.getClassByName("ContentUnderstandingAsyncClient").ifPresent(clazz -> { + boolean found = false; + for (MethodDeclaration method : clazz.getMethods()) { + if ("beginAnalyzeBinary".equals(method.getNameAsString()) + && method.getParameters().size() == 2) { + method.setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "return serviceClient.beginAnalyzeBinaryWithModelAsync(analyzerId, \"application/octet-stream\", binaryInput, requestOptions); }")); + found = true; + break; + } + } + logger.info("Async beginAnalyzeBinary 2-param found for body fix: {}", found); + })); + } + + /** + * SERVICE-FIX: Fix SupportedModels to use List instead of Map. + * The service returns arrays for completion/embedding fields, not maps. + * This fixes the deserialization error: "Unexpected token to begin map deserialization: START_ARRAY" + */ + private void customizeSupportedModels(LibraryCustomization customization, Logger logger) { + logger.info("Customizing SupportedModels to use List instead of Map"); + + customization.getClass(MODELS_PACKAGE, "SupportedModels").customizeAst(ast -> { + ast.addImport("java.util.List"); + ast.addImport("java.util.ArrayList"); + + ast.getClassByName("SupportedModels").ifPresent(clazz -> { + // Change completion field from Map to List + clazz.getFieldByName("completion").ifPresent(field -> { + field.getVariable(0).setType("List"); + }); + + // Change embedding field from Map to List + clazz.getFieldByName("embedding").ifPresent(field -> { + field.getVariable(0).setType("List"); + }); + + // Update getCompletion return type + clazz.getMethodsByName("getCompletion").forEach(method -> { + method.setType("List"); + }); + + // Update getEmbedding return type + clazz.getMethodsByName("getEmbedding").forEach(method -> { + method.setType("List"); + }); + + // Update constructor parameter types + clazz.getConstructors().forEach(constructor -> { + constructor.getParameters().forEach(param -> { + String paramName = param.getNameAsString(); + if ("completion".equals(paramName) || "embedding".equals(paramName)) { + param.setType("List"); + } + }); + }); + + // Update toJson method - change writeMapField to writeArrayField + clazz.getMethodsByName("toJson").forEach(method -> { + method.getBody().ifPresent(body -> { + String bodyStr = body.toString(); + // Replace writeMapField with writeArrayField for completion and embedding + bodyStr = bodyStr.replace( + "jsonWriter.writeMapField(\"completion\", this.completion, (writer, element) -> writer.writeString(element))", + "jsonWriter.writeArrayField(\"completion\", this.completion, (writer, element) -> writer.writeString(element))"); + bodyStr = bodyStr.replace( + "jsonWriter.writeMapField(\"embedding\", this.embedding, (writer, element) -> writer.writeString(element))", + "jsonWriter.writeArrayField(\"embedding\", this.embedding, (writer, element) -> writer.writeString(element))"); + method.setBody(StaticJavaParser.parseBlock(bodyStr)); + }); + }); + + // Update fromJson method - change readMap to readArray + clazz.getMethodsByName("fromJson").forEach(method -> { + method.getBody().ifPresent(body -> { + String bodyStr = body.toString(); + // Replace Map with List + bodyStr = bodyStr.replace("Map completion = null;", "List completion = null;"); + bodyStr = bodyStr.replace("Map embedding = null;", "List embedding = null;"); + // Replace readMap with readArray + bodyStr = bodyStr.replace( + "completion = reader.readMap(reader1 -> reader1.getString());", + "completion = reader.readArray(reader1 -> reader1.getString());"); + bodyStr = bodyStr.replace( + "embedding = reader.readMap(reader1 -> reader1.getString());", + "embedding = reader.readArray(reader1 -> reader1.getString());"); + method.setBody(StaticJavaParser.parseBlock(bodyStr)); + }); + }); + }); + }); + } + + /** + * SERVICE-FIX: Fix the copyAnalyzer API path and expected responses. + * + * The TypeSpec/Swagger spec incorrectly uses ":copyAnalyzer" as the action, + * but the actual service endpoint uses ":copy". Additionally, the spec only + * expects 202 response, but the service can return 200, 201, or 202. + * + * This customization modifies the ContentUnderstandingService interface annotations + * to match the actual service behavior. + */ + private void customizeCopyAnalyzerApi(LibraryCustomization customization, Logger logger) { + logger.info("SERVICE-FIX: Customizing copyAnalyzer API path and expected responses"); + + customization.getClass(IMPLEMENTATION_PACKAGE, "ContentUnderstandingClientImpl").customizeAst(ast -> { + ast.addImport("com.azure.core.exception.ResourceNotFoundException"); + + // Find the ContentUnderstandingService interface inside ContentUnderstandingClientImpl + ast.getClassByName("ContentUnderstandingClientImpl").ifPresent(implClass -> { + implClass.getMembers().stream() + .filter(member -> member instanceof ClassOrInterfaceDeclaration) + .map(member -> (ClassOrInterfaceDeclaration) member) + .filter(innerClass -> innerClass.getNameAsString().equals("ContentUnderstandingService")) + .findFirst() + .ifPresent(serviceInterface -> { + // Find and update copyAnalyzer method + serviceInterface.getMethodsByName("copyAnalyzer").forEach(method -> { + // Update @Post annotation from ":copyAnalyzer" to ":copy" + method.getAnnotationByName("Post").ifPresent(postAnnotation -> { + postAnnotation.asNormalAnnotationExpr().getPairs().forEach(pair -> { + if (pair.getValue().toString().contains(":copyAnalyzer")) { + pair.setValue(StaticJavaParser.parseExpression( + "\"/analyzers/{analyzerId}:copy\"")); + logger.info("Updated @Post path for copyAnalyzer async method"); + } + }); + // Handle single value annotation + if (postAnnotation.isSingleMemberAnnotationExpr()) { + String value = postAnnotation.asSingleMemberAnnotationExpr() + .getMemberValue().toString(); + if (value.contains(":copyAnalyzer")) { + postAnnotation.asSingleMemberAnnotationExpr().setMemberValue( + StaticJavaParser.parseExpression("\"/analyzers/{analyzerId}:copy\"")); + logger.info("Updated @Post path for copyAnalyzer async method (single value)"); + } + } + }); + + // Update @ExpectedResponses from { 202 } to { 200, 201, 202 } + method.getAnnotationByName("ExpectedResponses").ifPresent(expectedAnnotation -> { + if (expectedAnnotation.isSingleMemberAnnotationExpr()) { + expectedAnnotation.asSingleMemberAnnotationExpr().setMemberValue( + StaticJavaParser.parseExpression("{ 200, 201, 202 }")); + logger.info("Updated @ExpectedResponses for copyAnalyzer async method"); + } + }); + + // Add @UnexpectedResponseExceptionType for 404 if not present + boolean has404Handler = method.getAnnotations().stream() + .filter(a -> a.getNameAsString().equals("UnexpectedResponseExceptionType")) + .anyMatch(a -> a.toString().contains("404")); + + if (!has404Handler) { + // Add 404 handler annotation + method.addAnnotation(StaticJavaParser.parseAnnotation( + "@UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 })")); + logger.info("Added 404 exception handler for copyAnalyzer async method"); + } + }); + + // Find and update copyAnalyzerSync method + serviceInterface.getMethodsByName("copyAnalyzerSync").forEach(method -> { + // Update @Post annotation from ":copyAnalyzer" to ":copy" + method.getAnnotationByName("Post").ifPresent(postAnnotation -> { + postAnnotation.asNormalAnnotationExpr().getPairs().forEach(pair -> { + if (pair.getValue().toString().contains(":copyAnalyzer")) { + pair.setValue(StaticJavaParser.parseExpression( + "\"/analyzers/{analyzerId}:copy\"")); + logger.info("Updated @Post path for copyAnalyzerSync method"); + } + }); + // Handle single value annotation + if (postAnnotation.isSingleMemberAnnotationExpr()) { + String value = postAnnotation.asSingleMemberAnnotationExpr() + .getMemberValue().toString(); + if (value.contains(":copyAnalyzer")) { + postAnnotation.asSingleMemberAnnotationExpr().setMemberValue( + StaticJavaParser.parseExpression("\"/analyzers/{analyzerId}:copy\"")); + logger.info("Updated @Post path for copyAnalyzerSync method (single value)"); + } + } + }); + + // Update @ExpectedResponses from { 202 } to { 200, 201, 202 } + method.getAnnotationByName("ExpectedResponses").ifPresent(expectedAnnotation -> { + if (expectedAnnotation.isSingleMemberAnnotationExpr()) { + expectedAnnotation.asSingleMemberAnnotationExpr().setMemberValue( + StaticJavaParser.parseExpression("{ 200, 201, 202 }")); + logger.info("Updated @ExpectedResponses for copyAnalyzerSync method"); + } + }); + + // Add @UnexpectedResponseExceptionType for 404 if not present + boolean has404Handler = method.getAnnotations().stream() + .filter(a -> a.getNameAsString().equals("UnexpectedResponseExceptionType")) + .anyMatch(a -> a.toString().contains("404")); + + if (!has404Handler) { + // Add 404 handler annotation + method.addAnnotation(StaticJavaParser.parseAnnotation( + "@UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 })")); + logger.info("Added 404 exception handler for copyAnalyzerSync method"); + } + }); + }); + }); + }); + } + + // =================== Update Convenience Methods =================== + + /** + * Make ContentUnderstandingDefaults constructor public to allow creating instances + * for the updateDefaults convenience method. + */ + private void customizeContentUnderstandingDefaults(LibraryCustomization customization, Logger logger) { + logger.info("Customizing ContentUnderstandingDefaults to make constructor public and remove @Immutable"); + + customization.getClass(MODELS_PACKAGE, "ContentUnderstandingDefaults").customizeAst(ast -> { + // Remove @Immutable annotation + ast.getClassByName("ContentUnderstandingDefaults").ifPresent(clazz -> { + clazz.getAnnotationByName("Immutable").ifPresent(Node::remove); + + // Find the existing constructor and make it public + clazz.getConstructors().forEach(constructor -> { + constructor.removeModifier(Modifier.Keyword.PRIVATE); + constructor.addModifier(Modifier.Keyword.PUBLIC); + + // Update Javadoc + constructor.setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Creates an instance of ContentUnderstandingDefaults class.")) + .addBlockTag("param", "modelDeployments Mapping of model names to deployments. " + + "For example: { \"gpt-4.1\": \"myGpt41Deployment\", \"text-embedding-3-large\": \"myTextEmbedding3LargeDeployment\" }.")); + }); + }); + }); + } + + /** + * Add convenience methods for updateDefaults that accept typed objects + * instead of BinaryData. This is equivalent to C# Update Operations in ContentUnderstandingClient.Customizations.cs + * + * Note: TypeSpec auto-generates updateAnalyzer convenience methods, so we only add updateDefaults here. + * The updateDefaults convenience methods were disabled in TypeSpec because they require a public constructor + * on ContentUnderstandingDefaults, which we enable via customizeContentUnderstandingDefaults. + */ + private void addUpdateDefaultsConvenienceMethods(LibraryCustomization customization, Logger logger) { + logger.info("Adding updateDefaults convenience methods"); + + // Add to sync client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingClient").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.models.ContentUnderstandingDefaults"); + ast.addImport("com.azure.core.util.BinaryData"); + ast.addImport("java.util.Map"); + + ast.getClassByName("ContentUnderstandingClient").ifPresent(clazz -> { + // Add updateDefaults convenience method with Map parameter - returns ContentUnderstandingDefaults directly + clazz.addMethod("updateDefaults", Modifier.Keyword.PUBLIC) + .setType("ContentUnderstandingDefaults") + .addParameter("Map", "modelDeployments") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Update default model deployment settings.\n\n" + + "This is the recommended public API for updating default model deployment settings. " + + "This method provides a simpler API that accepts a Map of model names to deployment names.")) + .addBlockTag("param", "modelDeployments Mapping of model names to deployment names. " + + "For example: { \"gpt-4.1\": \"myGpt41Deployment\", \"text-embedding-3-large\": \"myTextEmbedding3LargeDeployment\" }.") + .addBlockTag("return", "the updated ContentUnderstandingDefaults.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "ContentUnderstandingDefaults defaults = new ContentUnderstandingDefaults(modelDeployments);" + + "Response response = updateDefaultsWithResponse(BinaryData.fromObject(defaults), null);" + + "return response.getValue().toObject(ContentUnderstandingDefaults.class); }")); + + // Add updateDefaults convenience method with ContentUnderstandingDefaults parameter + clazz.addMethod("updateDefaults", Modifier.Keyword.PUBLIC) + .setType("ContentUnderstandingDefaults") + .addParameter("ContentUnderstandingDefaults", "defaults") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Update default model deployment settings.\n\n" + + "This is a convenience method that accepts a ContentUnderstandingDefaults object.")) + .addBlockTag("param", "defaults The ContentUnderstandingDefaults instance with settings to update.") + .addBlockTag("return", "the updated ContentUnderstandingDefaults.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "Response response = updateDefaultsWithResponse(BinaryData.fromObject(defaults), null);" + + "return response.getValue().toObject(ContentUnderstandingDefaults.class); }")); + }); + }); + + // Add to async client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingAsyncClient").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.models.ContentUnderstandingDefaults"); + ast.addImport("com.azure.core.util.BinaryData"); + ast.addImport("java.util.Map"); + + ast.getClassByName("ContentUnderstandingAsyncClient").ifPresent(clazz -> { + // Add updateDefaults convenience method with Map parameter - returns Mono + clazz.addMethod("updateDefaults", Modifier.Keyword.PUBLIC) + .setType("Mono") + .addParameter("Map", "modelDeployments") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Update default model deployment settings.\n\n" + + "This is the recommended public API for updating default model deployment settings. " + + "This method provides a simpler API that accepts a Map of model names to deployment names.")) + .addBlockTag("param", "modelDeployments Mapping of model names to deployment names. " + + "For example: { \"gpt-4.1\": \"myGpt41Deployment\", \"text-embedding-3-large\": \"myTextEmbedding3LargeDeployment\" }.") + .addBlockTag("return", "the updated ContentUnderstandingDefaults on successful completion of {@link Mono}.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "ContentUnderstandingDefaults defaults = new ContentUnderstandingDefaults(modelDeployments);" + + "return updateDefaultsWithResponse(BinaryData.fromObject(defaults), null)" + + ".map(response -> response.getValue().toObject(ContentUnderstandingDefaults.class)); }")); + + // Add updateDefaults convenience method with ContentUnderstandingDefaults parameter + clazz.addMethod("updateDefaults", Modifier.Keyword.PUBLIC) + .setType("Mono") + .addParameter("ContentUnderstandingDefaults", "defaults") + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Update default model deployment settings.\n\n" + + "This is a convenience method that accepts a ContentUnderstandingDefaults object.")) + .addBlockTag("param", "defaults The ContentUnderstandingDefaults instance with settings to update.") + .addBlockTag("return", "the updated ContentUnderstandingDefaults on successful completion of {@link Mono}.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return updateDefaultsWithResponse(BinaryData.fromObject(defaults), null)" + + ".map(response -> response.getValue().toObject(ContentUnderstandingDefaults.class)); }")); + }); + }); + } + + /** + * Add beginAnalyzeBinary convenience overloads without stringEncoding. + * Adds 2-param, 3-param, and 5-param overloads that default utf16. + */ + private void addBeginAnalyzeBinaryConvenienceOverloads(LibraryCustomization customization, Logger logger) { + logger.info("Adding beginAnalyzeBinary convenience overloads (2/3/5 param)"); + + // Sync client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingClient").customizeAst(ast -> { + ast.getClassByName("ContentUnderstandingClient").ifPresent(clazz -> { + // 2-param: analyzerId, binaryInput + clazz.addMethod("beginAnalyzeBinary", Modifier.Keyword.PUBLIC) + .setType("SyncPoller") + .addParameter("String", "analyzerId") + .addParameter("BinaryData", "binaryInput") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from binary input. Uses default content type (application/octet-stream), " + + "default string encoding (utf16), and service default processing location.")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "binaryInput The binary content of the document to analyze.") + .addBlockTag("return", "the {@link SyncPoller} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyzeBinary(analyzerId, binaryInput, null, \"application/octet-stream\", null); }")); + + // 3-param: analyzerId, binaryInput, inputRange + clazz.addMethod("beginAnalyzeBinary", Modifier.Keyword.PUBLIC) + .setType("SyncPoller") + .addParameter("String", "analyzerId") + .addParameter("BinaryData", "binaryInput") + .addParameter("String", "inputRange") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from binary input. Uses default content type (application/octet-stream), " + + "default string encoding (utf16), and service default processing location.")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "binaryInput The binary content of the document to analyze.") + .addBlockTag("param", "inputRange Range of the input to analyze (ex. 1-3,5,9-). Document content uses 1-based page numbers; audio visual uses milliseconds.") + .addBlockTag("return", "the {@link SyncPoller} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyzeBinary(analyzerId, binaryInput, inputRange, \"application/octet-stream\", null); }")); + + // 5-param: analyzerId, binaryInput, inputRange, contentType, processingLocation + clazz.addMethod("beginAnalyzeBinary", Modifier.Keyword.PUBLIC) + .setType("SyncPoller") + .addParameter("String", "analyzerId") + .addParameter("BinaryData", "binaryInput") + .addParameter("String", "inputRange") + .addParameter("String", "contentType") + .addParameter("ProcessingLocation", "processingLocation") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from binary input. Uses default string encoding (utf16).")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "binaryInput The binary content of the document to analyze.") + .addBlockTag("param", "inputRange Range of the input to analyze (ex. 1-3,5,9-). Document content uses 1-based page numbers; audio visual uses milliseconds.") + .addBlockTag("param", "contentType Request content type.") + .addBlockTag("param", "processingLocation The location where the data may be processed. Set to null for service default.") + .addBlockTag("return", "the {@link SyncPoller} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "if (inputRange != null) { requestOptions.addQueryParam(\"range\", inputRange, false); }" + + "if (processingLocation != null) { requestOptions.addQueryParam(\"processingLocation\", processingLocation.toString(), false); }" + + "requestOptions.addQueryParam(\"stringEncoding\", \"utf16\", false);" + + "return serviceClient.beginAnalyzeBinaryWithModel(analyzerId, contentType, binaryInput, requestOptions); }")); + }); + }); + + // Async client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingAsyncClient").customizeAst(ast -> { + ast.getClassByName("ContentUnderstandingAsyncClient").ifPresent(clazz -> { + // 2-param: analyzerId, binaryInput + clazz.addMethod("beginAnalyzeBinary", Modifier.Keyword.PUBLIC) + .setType("PollerFlux") + .addParameter("String", "analyzerId") + .addParameter("BinaryData", "binaryInput") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from binary input. Uses default content type (application/octet-stream), " + + "default string encoding (utf16), and service default processing location.")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "binaryInput The binary content of the document to analyze.") + .addBlockTag("return", "the {@link PollerFlux} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyzeBinary(analyzerId, binaryInput, null, \"application/octet-stream\", null); }")); + + // 3-param: analyzerId, binaryInput, inputRange + clazz.addMethod("beginAnalyzeBinary", Modifier.Keyword.PUBLIC) + .setType("PollerFlux") + .addParameter("String", "analyzerId") + .addParameter("BinaryData", "binaryInput") + .addParameter("String", "inputRange") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from binary input. Uses default content type (application/octet-stream), " + + "default string encoding (utf16), and service default processing location.")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "binaryInput The binary content of the document to analyze.") + .addBlockTag("param", "inputRange Range of the input to analyze (ex. 1-3,5,9-). Document content uses 1-based page numbers; audio visual uses milliseconds.") + .addBlockTag("return", "the {@link PollerFlux} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyzeBinary(analyzerId, binaryInput, inputRange, \"application/octet-stream\", null); }")); + + // 5-param: analyzerId, binaryInput, inputRange, contentType, processingLocation + clazz.addMethod("beginAnalyzeBinary", Modifier.Keyword.PUBLIC) + .setType("PollerFlux") + .addParameter("String", "analyzerId") + .addParameter("BinaryData", "binaryInput") + .addParameter("String", "inputRange") + .addParameter("String", "contentType") + .addParameter("ProcessingLocation", "processingLocation") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from binary input. Uses default string encoding (utf16).")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "binaryInput The binary content of the document to analyze.") + .addBlockTag("param", "inputRange Range of the input to analyze (ex. 1-3,5,9-). Document content uses 1-based page numbers; audio visual uses milliseconds.") + .addBlockTag("param", "contentType Request content type.") + .addBlockTag("param", "processingLocation The location where the data may be processed. Set to null for service default.") + .addBlockTag("return", "the {@link PollerFlux} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "if (inputRange != null) { requestOptions.addQueryParam(\"range\", inputRange, false); }" + + "if (processingLocation != null) { requestOptions.addQueryParam(\"processingLocation\", processingLocation.toString(), false); }" + + "requestOptions.addQueryParam(\"stringEncoding\", \"utf16\", false);" + + "return serviceClient.beginAnalyzeBinaryWithModelAsync(analyzerId, contentType, binaryInput, requestOptions); }")); + }); + }); + } + + /** + * Add beginAnalyze convenience overloads without stringEncoding. + * Adds 2-param and 4-param overloads that default utf16. + */ + private void addBeginAnalyzeConvenienceOverloads(LibraryCustomization customization, Logger logger) { + logger.info("Adding beginAnalyze convenience overloads (2/4 param)"); + + // Sync client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingClient").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.implementation.models.AnalyzeRequest1"); + ast.addImport("com.azure.core.util.BinaryData"); + ast.getClassByName("ContentUnderstandingClient").ifPresent(clazz -> { + // 2-param: analyzerId, inputs + clazz.addMethod("beginAnalyze", Modifier.Keyword.PUBLIC) + .setType("SyncPoller") + .addParameter("String", "analyzerId") + .addParameter("List", "inputs") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from inputs. Uses default string encoding (utf16), " + + "service default model deployments, and global processing location.")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "inputs The inputs to analyze.") + .addBlockTag("return", "the {@link SyncPoller} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyze(analyzerId, inputs, null, null); }")); + + // 4-param: analyzerId, inputs, modelDeployments, processingLocation + clazz.addMethod("beginAnalyze", Modifier.Keyword.PUBLIC) + .setType("SyncPoller") + .addParameter("String", "analyzerId") + .addParameter("List", "inputs") + .addParameter("Map", "modelDeployments") + .addParameter("ProcessingLocation", "processingLocation") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from inputs. Uses default string encoding (utf16).")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "inputs The inputs to analyze.") + .addBlockTag("param", "modelDeployments Custom model deployment mappings. Set to null to use service defaults.") + .addBlockTag("param", "processingLocation The processing location for the analysis. Set to null to use the service default.") + .addBlockTag("return", "the {@link SyncPoller} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "if (processingLocation != null) { requestOptions.addQueryParam(\"processingLocation\", processingLocation.toString(), false); }" + + "requestOptions.addQueryParam(\"stringEncoding\", \"utf16\", false);" + + "AnalyzeRequest1 analyzeRequest1Obj = new AnalyzeRequest1().setInputs(inputs).setModelDeployments(modelDeployments);" + + "BinaryData analyzeRequest1 = BinaryData.fromObject(analyzeRequest1Obj);" + + "return serviceClient.beginAnalyzeWithModel(analyzerId, analyzeRequest1, requestOptions); }")); + }); + }); + + // Async client + customization.getClass(PACKAGE_NAME, "ContentUnderstandingAsyncClient").customizeAst(ast -> { + ast.addImport("com.azure.ai.contentunderstanding.implementation.models.AnalyzeRequest1"); + ast.addImport("com.azure.core.util.BinaryData"); + ast.getClassByName("ContentUnderstandingAsyncClient").ifPresent(clazz -> { + // 2-param: analyzerId, inputs + clazz.addMethod("beginAnalyze", Modifier.Keyword.PUBLIC) + .setType("PollerFlux") + .addParameter("String", "analyzerId") + .addParameter("List", "inputs") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from inputs. Uses default string encoding (utf16), " + + "service default model deployments, and global processing location.")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "inputs The inputs to analyze.") + .addBlockTag("return", "the {@link PollerFlux} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "return beginAnalyze(analyzerId, inputs, null, null); }")); + + // 4-param: analyzerId, inputs, modelDeployments, processingLocation + clazz.addMethod("beginAnalyze", Modifier.Keyword.PUBLIC) + .setType("PollerFlux") + .addParameter("String", "analyzerId") + .addParameter("List", "inputs") + .addParameter("Map", "modelDeployments") + .addParameter("ProcessingLocation", "processingLocation") + .addAnnotation(StaticJavaParser.parseAnnotation("@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)")) + .setJavadocComment(new Javadoc(JavadocDescription.parseText( + "Extract content and fields from inputs. Uses default string encoding (utf16).")) + .addBlockTag("param", "analyzerId The unique identifier of the analyzer.") + .addBlockTag("param", "inputs The inputs to analyze.") + .addBlockTag("param", "modelDeployments Custom model deployment mappings. Set to null to use service defaults.") + .addBlockTag("param", "processingLocation The processing location for the analysis. Set to null to use the service default.") + .addBlockTag("return", "the {@link PollerFlux} for polling of the analyze operation.") + .addBlockTag("throws", "IllegalArgumentException thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException thrown if the request is rejected by server.")) + .setBody(StaticJavaParser.parseBlock("{" + + "RequestOptions requestOptions = new RequestOptions();" + + "if (processingLocation != null) { requestOptions.addQueryParam(\"processingLocation\", processingLocation.toString(), false); }" + + "requestOptions.addQueryParam(\"stringEncoding\", \"utf16\", false);" + + "AnalyzeRequest1 analyzeRequest1Obj = new AnalyzeRequest1().setInputs(inputs).setModelDeployments(modelDeployments);" + + "BinaryData analyzeRequest1 = BinaryData.fromObject(analyzeRequest1Obj);" + + "return serviceClient.beginAnalyzeWithModelAsync(analyzerId, analyzeRequest1, requestOptions); }")); + }); + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/pom.xml b/sdk/contentunderstanding/azure-ai-contentunderstanding/pom.xml new file mode 100644 index 000000000000..5e777ea6045e --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/pom.xml @@ -0,0 +1,97 @@ + + + 4.0.0 + + com.azure + azure-client-sdk-parent + 1.7.0 + ../../parents/azure-client-sdk-parent + + + com.azure + azure-ai-contentunderstanding + 1.0.0-beta.1 + jar + + Microsoft Azure SDK for ContentUnderstanding + This package contains Microsoft Azure ContentUnderstanding client library. + https://github.com/Azure/azure-sdk-for-java + + + + The MIT License (MIT) + http://opensource.org/licenses/MIT + repo + + + + + https://github.com/Azure/azure-sdk-for-java + scm:git:git@github.com:Azure/azure-sdk-for-java.git + scm:git:git@github.com:Azure/azure-sdk-for-java.git + HEAD + + + + microsoft + Microsoft + + + + UTF-8 + + 0 + 0 + + **/generated/Sample*.java,**/samples/Sample*.java + + + + com.azure + azure-core + 1.57.1 + + + com.azure + azure-core-http-netty + 1.16.3 + + + com.azure + azure-core-test + 1.27.0-beta.14 + test + + + com.azure + azure-identity + 1.18.2 + test + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 3.5.3 + + + + **/*Test.java + **/*Tests.java + + **/generated/Sample*.java + + **/tests/samples/Sample*.java + + + + + + diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingAsyncClient.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingAsyncClient.java new file mode 100644 index 000000000000..c78ed8aeaa9b --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingAsyncClient.java @@ -0,0 +1,2106 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding; + +import com.azure.ai.contentunderstanding.implementation.ContentUnderstandingClientImpl; +import com.azure.ai.contentunderstanding.implementation.JsonMergePatchHelper; +import com.azure.ai.contentunderstanding.implementation.models.AnalyzeRequest1; +import com.azure.ai.contentunderstanding.implementation.models.CopyAnalyzerRequest; +import com.azure.ai.contentunderstanding.implementation.models.GrantCopyAuthorizationRequest1; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentUnderstandingDefaults; +import com.azure.ai.contentunderstanding.models.CopyAuthorization; +import com.azure.ai.contentunderstanding.models.ProcessingLocation; +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.ReturnType; +import com.azure.core.annotation.ServiceClient; +import com.azure.core.annotation.ServiceMethod; +import com.azure.core.exception.ClientAuthenticationException; +import com.azure.core.exception.HttpResponseException; +import com.azure.core.exception.ResourceModifiedException; +import com.azure.core.exception.ResourceNotFoundException; +import com.azure.core.http.rest.PagedFlux; +import com.azure.core.http.rest.PagedResponse; +import com.azure.core.http.rest.PagedResponseBase; +import com.azure.core.http.rest.RequestOptions; +import com.azure.core.http.rest.Response; +import com.azure.core.util.BinaryData; +import com.azure.core.util.FluxUtil; +import com.azure.core.util.polling.PollerFlux; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * Initializes a new instance of the asynchronous ContentUnderstandingClient type. + */ +@ServiceClient(builder = ContentUnderstandingClientBuilder.class, isAsync = true) +public final class ContentUnderstandingAsyncClient { + + @Generated + private final ContentUnderstandingClientImpl serviceClient; + + /** + * Initializes an instance of ContentUnderstandingAsyncClient class. + * + * @param serviceClient the service client implementation. + */ + @Generated + ContentUnderstandingAsyncClient(ContentUnderstandingClientImpl serviceClient) { + this.serviceClient = serviceClient; + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     inputs (Optional): [
+     *          (Optional){
+     *             url: String (Optional)
+     *             data: byte[] (Optional)
+     *             name: String (Optional)
+     *             mimeType: String (Optional)
+     *             range: String (Optional)
+     *         }
+     *     ]
+     *     modelDeployments (Optional): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param analyzeRequest1 The analyzeRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of provides status details for long running operations. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginAnalyze(String analyzerId, BinaryData analyzeRequest1, + RequestOptions requestOptions) { + return this.serviceClient.beginAnalyzeAsync(analyzerId, analyzeRequest1, requestOptions); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
rangeStringNoRange of the input to analyze (ex. `1-3,5,9-`). Document content + * uses 1-based page numbers, while audio visual content uses integer milliseconds.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param contentType Request content type. + * @param binaryInput The binary content of the document to analyze. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of provides status details for long running operations. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginAnalyzeBinary(String analyzerId, String contentType, + BinaryData binaryInput, RequestOptions requestOptions) { + return this.serviceClient.beginAnalyzeBinaryAsync(analyzerId, contentType, binaryInput, requestOptions); + } + + /** + * Create a copy of the source analyzer to the current location. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     sourceAzureResourceId: String (Optional)
+     *     sourceRegion: String (Optional)
+     *     sourceAnalyzerId: String (Required)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param copyAnalyzerRequest The copyAnalyzerRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginCopyAnalyzer(String analyzerId, BinaryData copyAnalyzerRequest, + RequestOptions requestOptions) { + return this.serviceClient.beginCopyAnalyzerAsync(analyzerId, copyAnalyzerRequest, requestOptions); + } + + /** + * Create a new analyzer asynchronously. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginCreateAnalyzer(String analyzerId, BinaryData resource, + RequestOptions requestOptions) { + return this.serviceClient.beginCreateAnalyzerAsync(analyzerId, resource, requestOptions); + } + + /** + * Delete analyzer. + * + * @param analyzerId The unique identifier of the analyzer. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> deleteAnalyzerWithResponse(String analyzerId, RequestOptions requestOptions) { + return this.serviceClient.deleteAnalyzerWithResponseAsync(analyzerId, requestOptions); + } + + /** + * Mark the result of an analysis operation for deletion. + * + * @param operationId Operation identifier. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> deleteResultWithResponse(String operationId, RequestOptions requestOptions) { + return this.serviceClient.deleteResultWithResponseAsync(operationId, requestOptions); + } + + /** + * Get analyzer properties. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer properties along with {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAnalyzerWithResponse(String analyzerId, RequestOptions requestOptions) { + return this.serviceClient.getAnalyzerWithResponseAsync(analyzerId, requestOptions); + } + + /** + * Return default settings for this Content Understanding resource. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Required): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return default settings for this Content Understanding resource along with {@link Response} on successful + * completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getDefaultsWithResponse(RequestOptions requestOptions) { + return this.serviceClient.getDefaultsWithResponseAsync(requestOptions); + } + + /** + * Get the status of an analyzer creation operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Required)
+     *         description: String (Optional)
+     *         tags (Optional): {
+     *             String: String (Required)
+     *         }
+     *         status: String(creating/ready/deleting/failed) (Required)
+     *         createdAt: OffsetDateTime (Required)
+     *         lastModifiedAt: OffsetDateTime (Required)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         baseAnalyzerId: String (Optional)
+     *         config (Optional): {
+     *             returnDetails: Boolean (Optional)
+     *             locales (Optional): [
+     *                 String (Optional)
+     *             ]
+     *             enableOcr: Boolean (Optional)
+     *             enableLayout: Boolean (Optional)
+     *             enableFigureDescription: Boolean (Optional)
+     *             enableFigureAnalysis: Boolean (Optional)
+     *             enableFormula: Boolean (Optional)
+     *             tableFormat: String(html/markdown) (Optional)
+     *             chartFormat: String(chartJs/markdown) (Optional)
+     *             annotationFormat: String(none/markdown) (Optional)
+     *             disableFaceBlurring: Boolean (Optional)
+     *             estimateFieldSourceAndConfidence: Boolean (Optional)
+     *             contentCategories (Optional): {
+     *                 String (Required): {
+     *                     description: String (Optional)
+     *                     analyzerId: String (Optional)
+     *                     analyzer (Optional): (recursive schema, see analyzer above)
+     *                 }
+     *             }
+     *             enableSegment: Boolean (Optional)
+     *             segmentPerPage: Boolean (Optional)
+     *             omitContent: Boolean (Optional)
+     *         }
+     *         fieldSchema (Optional): {
+     *             name: String (Optional)
+     *             description: String (Optional)
+     *             fields (Optional, Required on create): {
+     *                 String (Required): {
+     *                     method: String(generate/extract/classify) (Optional)
+     *                     type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                     description: String (Optional)
+     *                     items (Optional): (recursive schema, see items above)
+     *                     properties (Optional): {
+     *                         String (Required): (recursive schema, see String above)
+     *                     }
+     *                     examples (Optional): [
+     *                         String (Optional)
+     *                     ]
+     *                     enum (Optional): [
+     *                         String (Optional)
+     *                     ]
+     *                     enumDescriptions (Optional): {
+     *                         String: String (Required)
+     *                     }
+     *                     $ref: String (Optional)
+     *                     estimateSourceAndConfidence: Boolean (Optional)
+     *                 }
+     *             }
+     *             definitions (Optional): {
+     *                 String (Required): (recursive schema, see String above)
+     *             }
+     *         }
+     *         dynamicFieldSchema: Boolean (Optional)
+     *         processingLocation: String(geography/dataZone/global) (Optional)
+     *         knowledgeSources (Optional): [
+     *              (Optional){
+     *                 kind: String(labeledData) (Required)
+     *             }
+     *         ]
+     *         models (Optional): {
+     *             String: String (Required)
+     *         }
+     *         supportedModels (Optional): {
+     *             completion (Optional): [
+     *                 String (Optional)
+     *             ]
+     *             embedding (Optional): [
+     *                 String (Optional)
+     *             ]
+     *         }
+     *     }
+     *     usage (Optional): {
+     *         documentPagesMinimal: Integer (Optional)
+     *         documentPagesBasic: Integer (Optional)
+     *         documentPagesStandard: Integer (Optional)
+     *         audioHours: Double (Optional)
+     *         videoHours: Double (Optional)
+     *         contextualizationTokens: Integer (Optional)
+     *         tokens (Optional): {
+     *             String: int (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param operationId The unique ID of the operation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the status of an analyzer creation operation along with {@link Response} on successful completion of + * {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getOperationStatusWithResponse(String analyzerId, String operationId, + RequestOptions requestOptions) { + return this.serviceClient.getOperationStatusWithResponseAsync(analyzerId, operationId, requestOptions); + } + + /** + * Get the result of an analysis operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     *     usage (Optional): {
+     *         documentPagesMinimal: Integer (Optional)
+     *         documentPagesBasic: Integer (Optional)
+     *         documentPagesStandard: Integer (Optional)
+     *         audioHours: Double (Optional)
+     *         videoHours: Double (Optional)
+     *         contextualizationTokens: Integer (Optional)
+     *         tokens (Optional): {
+     *             String: int (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + * @param operationId The unique ID of the operation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the result of an analysis operation along with {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getResultWithResponse(String operationId, RequestOptions requestOptions) { + return this.serviceClient.getResultWithResponseAsync(operationId, requestOptions); + } + + /** + * Get a file associated with the result of an analysis operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + * @param operationId Operation identifier. + * @param path File path. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a file associated with the result of an analysis operation along with {@link Response} on successful + * completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getResultFileWithResponse(String operationId, String path, + RequestOptions requestOptions) { + return this.serviceClient.getResultFileWithResponseAsync(operationId, path, requestOptions); + } + + /** + * Get authorization for copying this analyzer to another location. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     targetAzureResourceId: String (Required)
+     *     targetRegion: String (Optional)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     source: String (Required)
+     *     targetAzureResourceId: String (Required)
+     *     expiresAt: OffsetDateTime (Required)
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param grantCopyAuthorizationRequest1 The grantCopyAuthorizationRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return authorization for copying this analyzer to another location along with {@link Response} on successful + * completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> grantCopyAuthorizationWithResponse(String analyzerId, + BinaryData grantCopyAuthorizationRequest1, RequestOptions requestOptions) { + return this.serviceClient.grantCopyAuthorizationWithResponseAsync(analyzerId, grantCopyAuthorizationRequest1, + requestOptions); + } + + /** + * List analyzers. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return paged collection of ContentAnalyzer items as paginated response with {@link PagedFlux}. + */ + @Generated + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedFlux listAnalyzers(RequestOptions requestOptions) { + return this.serviceClient.listAnalyzersAsync(requestOptions); + } + + /** + * Update analyzer properties. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer that extracts content and fields from multimodal documents along with {@link Response} on + * successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> updateAnalyzerWithResponse(String analyzerId, BinaryData resource, + RequestOptions requestOptions) { + return this.serviceClient.updateAnalyzerWithResponseAsync(analyzerId, resource, requestOptions); + } + + /** + * Return default settings for this Content Understanding resource. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Optional): {
+     *          (Optional): {
+     *             String: String (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Required): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + * @param updateDefaultsRequest The updateDefaultsRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return default settings for this Content Understanding resource along with {@link Response} on successful + * completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> updateDefaultsWithResponse(BinaryData updateDefaultsRequest, + RequestOptions requestOptions) { + return this.serviceClient.updateDefaultsWithResponseAsync(updateDefaultsRequest, requestOptions); + } + + /** + * Extract content and fields from input. + * + * @param analyzerId The unique identifier of the analyzer. + * @param stringEncoding The string encoding format for content spans in the response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`."). + * @param inputs Inputs to analyze. Currently, only pro mode supports multiple inputs. + * @param modelDeployments Override default mapping of model names to deployments. + * Ex. { "gpt-4.1": "myGpt41Deployment", "text-embedding-3-large": "myTextEmbedding3LargeDeployment" }. + * @param processingLocation The location where the data may be processed. Defaults to global. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link PollerFlux} for polling of provides status details for long running operations. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + PollerFlux beginAnalyze(String analyzerId, + String stringEncoding, List inputs, Map modelDeployments, + ProcessingLocation processingLocation) { + // Generated convenience method for beginAnalyzeWithModel + RequestOptions requestOptions = new RequestOptions(); + AnalyzeRequest1 analyzeRequest1Obj + = new AnalyzeRequest1().setInputs(inputs).setModelDeployments(modelDeployments); + BinaryData analyzeRequest1 = BinaryData.fromObject(analyzeRequest1Obj); + requestOptions.addQueryParam("stringEncoding", stringEncoding, false); + if (processingLocation != null) { + requestOptions.addQueryParam("processingLocation", processingLocation.toString(), false); + } + return serviceClient.beginAnalyzeWithModelAsync(analyzerId, analyzeRequest1, requestOptions); + } + + /** + * Extract content and fields from input. + * + * @param analyzerId The unique identifier of the analyzer. + * @param stringEncoding The string encoding format for content spans in the response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`."). + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link PollerFlux} for polling of provides status details for long running operations. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + PollerFlux beginAnalyze(String analyzerId, + String stringEncoding) { + // Generated convenience method for beginAnalyzeWithModel + RequestOptions requestOptions = new RequestOptions(); + AnalyzeRequest1 analyzeRequest1Obj = new AnalyzeRequest1(); + BinaryData analyzeRequest1 = BinaryData.fromObject(analyzeRequest1Obj); + requestOptions.addQueryParam("stringEncoding", stringEncoding, false); + return serviceClient.beginAnalyzeWithModelAsync(analyzerId, analyzeRequest1, requestOptions); + } + + /** + * Extract content and fields from input. + * + * @param analyzerId The unique identifier of the analyzer. + * @param binaryInput The binary content of the document to analyze. + * @param stringEncoding The string encoding format for content spans in the response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`."). + * @param inputRange Range of the input to analyze (ex. `1-3,5,9-`). Document content uses 1-based page numbers, + * while audio visual content uses integer milliseconds. + * @param contentType Request content type. + * @param processingLocation The location where the data may be processed. Defaults to global. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link PollerFlux} for polling of provides status details for long running operations. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + PollerFlux beginAnalyzeBinary(String analyzerId, + BinaryData binaryInput, String stringEncoding, String inputRange, String contentType, + ProcessingLocation processingLocation) { + // Generated convenience method for beginAnalyzeBinaryWithModel + RequestOptions requestOptions = new RequestOptions(); + requestOptions.addQueryParam("stringEncoding", stringEncoding, false); + if (inputRange != null) { + requestOptions.addQueryParam("range", inputRange, false); + } + if (processingLocation != null) { + requestOptions.addQueryParam("processingLocation", processingLocation.toString(), false); + } + return serviceClient.beginAnalyzeBinaryWithModelAsync(analyzerId, contentType, binaryInput, requestOptions); + } + + /** + * Create a copy of the source analyzer to the current location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param sourceAnalyzerId Source analyzer ID. + * @param allowReplace Allow the operation to replace an existing resource. + * @param sourceAzureResourceId Azure resource ID of the source analyzer location. Defaults to the current resource. + * @param sourceRegion Azure region of the source analyzer location. Defaults to current region. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link PollerFlux} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginCopyAnalyzer(String analyzerId, + String sourceAnalyzerId, Boolean allowReplace, String sourceAzureResourceId, String sourceRegion) { + // Generated convenience method for beginCopyAnalyzerWithModel + RequestOptions requestOptions = new RequestOptions(); + CopyAnalyzerRequest copyAnalyzerRequestObj + = new CopyAnalyzerRequest(sourceAnalyzerId).setSourceAzureResourceId(sourceAzureResourceId) + .setSourceRegion(sourceRegion); + BinaryData copyAnalyzerRequest = BinaryData.fromObject(copyAnalyzerRequestObj); + if (allowReplace != null) { + requestOptions.addQueryParam("allowReplace", String.valueOf(allowReplace), false); + } + return serviceClient.beginCopyAnalyzerWithModelAsync(analyzerId, copyAnalyzerRequest, requestOptions); + } + + /** + * Create a copy of the source analyzer to the current location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param sourceAnalyzerId Source analyzer ID. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link PollerFlux} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginCopyAnalyzer(String analyzerId, + String sourceAnalyzerId) { + // Generated convenience method for beginCopyAnalyzerWithModel + RequestOptions requestOptions = new RequestOptions(); + CopyAnalyzerRequest copyAnalyzerRequestObj = new CopyAnalyzerRequest(sourceAnalyzerId); + BinaryData copyAnalyzerRequest = BinaryData.fromObject(copyAnalyzerRequestObj); + return serviceClient.beginCopyAnalyzerWithModelAsync(analyzerId, copyAnalyzerRequest, requestOptions); + } + + /** + * Create a new analyzer asynchronously. + * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param allowReplace Allow the operation to replace an existing resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link PollerFlux} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginCreateAnalyzer(String analyzerId, + ContentAnalyzer resource, Boolean allowReplace) { + // Generated convenience method for beginCreateAnalyzerWithModel + RequestOptions requestOptions = new RequestOptions(); + if (allowReplace != null) { + requestOptions.addQueryParam("allowReplace", String.valueOf(allowReplace), false); + } + return serviceClient.beginCreateAnalyzerWithModelAsync(analyzerId, BinaryData.fromObject(resource), + requestOptions); + } + + /** + * Create a new analyzer asynchronously. + * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link PollerFlux} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginCreateAnalyzer(String analyzerId, + ContentAnalyzer resource) { + // Generated convenience method for beginCreateAnalyzerWithModel + RequestOptions requestOptions = new RequestOptions(); + return serviceClient.beginCreateAnalyzerWithModelAsync(analyzerId, BinaryData.fromObject(resource), + requestOptions); + } + + /** + * Delete analyzer. + * + * @param analyzerId The unique identifier of the analyzer. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return A {@link Mono} that completes when a successful response is received. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono deleteAnalyzer(String analyzerId) { + // Generated convenience method for deleteAnalyzerWithResponse + RequestOptions requestOptions = new RequestOptions(); + return deleteAnalyzerWithResponse(analyzerId, requestOptions).flatMap(FluxUtil::toMono); + } + + /** + * Mark the result of an analysis operation for deletion. + * + * @param operationId Operation identifier. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return A {@link Mono} that completes when a successful response is received. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono deleteResult(String operationId) { + // Generated convenience method for deleteResultWithResponse + RequestOptions requestOptions = new RequestOptions(); + return deleteResultWithResponse(operationId, requestOptions).flatMap(FluxUtil::toMono); + } + + /** + * Get analyzer properties. + * + * @param analyzerId The unique identifier of the analyzer. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return analyzer properties on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono getAnalyzer(String analyzerId) { + // Generated convenience method for getAnalyzerWithResponse + RequestOptions requestOptions = new RequestOptions(); + return getAnalyzerWithResponse(analyzerId, requestOptions).flatMap(FluxUtil::toMono) + .map(protocolMethodData -> protocolMethodData.toObject(ContentAnalyzer.class)); + } + + /** + * Return default settings for this Content Understanding resource. + * + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return default settings for this Content Understanding resource on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono getDefaults() { + // Generated convenience method for getDefaultsWithResponse + RequestOptions requestOptions = new RequestOptions(); + return getDefaultsWithResponse(requestOptions).flatMap(FluxUtil::toMono) + .map(protocolMethodData -> protocolMethodData.toObject(ContentUnderstandingDefaults.class)); + } + + /** + * Get the status of an analyzer creation operation. + * + * @param analyzerId The unique identifier of the analyzer. + * @param operationId The unique ID of the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the status of an analyzer creation operation on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono getOperationStatus(String analyzerId, String operationId) { + // Generated convenience method for getOperationStatusWithResponse + RequestOptions requestOptions = new RequestOptions(); + return getOperationStatusWithResponse(analyzerId, operationId, requestOptions).flatMap(FluxUtil::toMono) + .map(protocolMethodData -> protocolMethodData.toObject(ContentAnalyzerOperationStatus.class)); + } + + /** + * Get the result of an analysis operation. + * + * @param operationId The unique ID of the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the result of an analysis operation on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono getResult(String operationId) { + // Generated convenience method for getResultWithResponse + RequestOptions requestOptions = new RequestOptions(); + return getResultWithResponse(operationId, requestOptions).flatMap(FluxUtil::toMono) + .map(protocolMethodData -> protocolMethodData.toObject(ContentAnalyzerAnalyzeOperationStatus.class)); + } + + /** + * Get a file associated with the result of an analysis operation. + * + * @param operationId Operation identifier. + * @param path File path. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a file associated with the result of an analysis operation on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono getResultFile(String operationId, String path) { + // Generated convenience method for getResultFileWithResponse + RequestOptions requestOptions = new RequestOptions(); + return getResultFileWithResponse(operationId, path, requestOptions).flatMap(FluxUtil::toMono); + } + + /** + * Get authorization for copying this analyzer to another location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param targetAzureResourceId Azure resource ID of the target analyzer location. + * @param targetRegion Azure region of the target analyzer location. Defaults to current region. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return authorization for copying this analyzer to another location on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono grantCopyAuthorization(String analyzerId, String targetAzureResourceId, + String targetRegion) { + // Generated convenience method for grantCopyAuthorizationWithResponse + RequestOptions requestOptions = new RequestOptions(); + GrantCopyAuthorizationRequest1 grantCopyAuthorizationRequest1Obj + = new GrantCopyAuthorizationRequest1(targetAzureResourceId).setTargetRegion(targetRegion); + BinaryData grantCopyAuthorizationRequest1 = BinaryData.fromObject(grantCopyAuthorizationRequest1Obj); + return grantCopyAuthorizationWithResponse(analyzerId, grantCopyAuthorizationRequest1, requestOptions) + .flatMap(FluxUtil::toMono) + .map(protocolMethodData -> protocolMethodData.toObject(CopyAuthorization.class)); + } + + /** + * Get authorization for copying this analyzer to another location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param targetAzureResourceId Azure resource ID of the target analyzer location. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return authorization for copying this analyzer to another location on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono grantCopyAuthorization(String analyzerId, String targetAzureResourceId) { + // Generated convenience method for grantCopyAuthorizationWithResponse + RequestOptions requestOptions = new RequestOptions(); + GrantCopyAuthorizationRequest1 grantCopyAuthorizationRequest1Obj + = new GrantCopyAuthorizationRequest1(targetAzureResourceId); + BinaryData grantCopyAuthorizationRequest1 = BinaryData.fromObject(grantCopyAuthorizationRequest1Obj); + return grantCopyAuthorizationWithResponse(analyzerId, grantCopyAuthorizationRequest1, requestOptions) + .flatMap(FluxUtil::toMono) + .map(protocolMethodData -> protocolMethodData.toObject(CopyAuthorization.class)); + } + + /** + * List analyzers. + * + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return paged collection of ContentAnalyzer items as paginated response with {@link PagedFlux}. + */ + @Generated + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedFlux listAnalyzers() { + // Generated convenience method for listAnalyzers + RequestOptions requestOptions = new RequestOptions(); + PagedFlux pagedFluxResponse = listAnalyzers(requestOptions); + return PagedFlux.create(() -> (continuationTokenParam, pageSizeParam) -> { + Flux> flux = (continuationTokenParam == null) + ? pagedFluxResponse.byPage().take(1) + : pagedFluxResponse.byPage(continuationTokenParam).take(1); + return flux.map(pagedResponse -> new PagedResponseBase(pagedResponse.getRequest(), + pagedResponse.getStatusCode(), pagedResponse.getHeaders(), + pagedResponse.getValue() + .stream() + .map(protocolMethodData -> protocolMethodData.toObject(ContentAnalyzer.class)) + .collect(Collectors.toList()), + pagedResponse.getContinuationToken(), null)); + }); + } + + /** + * Update analyzer properties. + * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return analyzer that extracts content and fields from multimodal documents on successful completion of + * {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono updateAnalyzer(String analyzerId, ContentAnalyzer resource) { + // Generated convenience method for updateAnalyzerWithResponse + RequestOptions requestOptions = new RequestOptions(); + JsonMergePatchHelper.getContentAnalyzerAccessor().prepareModelForJsonMergePatch(resource, true); + BinaryData resourceInBinaryData = BinaryData.fromObject(resource); + // BinaryData.fromObject() will not fire serialization, use getLength() to fire serialization. + resourceInBinaryData.getLength(); + JsonMergePatchHelper.getContentAnalyzerAccessor().prepareModelForJsonMergePatch(resource, false); + return updateAnalyzerWithResponse(analyzerId, resourceInBinaryData, requestOptions).flatMap(FluxUtil::toMono) + .map(protocolMethodData -> protocolMethodData.toObject(ContentAnalyzer.class)); + } + + /** + * Update default model deployment settings. + * + * This is the recommended public API for updating default model deployment settings. This method provides a simpler + * API that accepts a Map of model names to deployment names. + * + * @param modelDeployments Mapping of model names to deployment names. For example: { "gpt-4.1": + * "myGpt41Deployment", "text-embedding-3-large": "myTextEmbedding3LargeDeployment" }. + * @return the updated ContentUnderstandingDefaults on successful completion of {@link Mono}. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + public Mono updateDefaults(Map modelDeployments) { + ContentUnderstandingDefaults defaults = new ContentUnderstandingDefaults(modelDeployments); + return updateDefaultsWithResponse(BinaryData.fromObject(defaults), null) + .map(response -> response.getValue().toObject(ContentUnderstandingDefaults.class)); + } + + /** + * Update default model deployment settings. + * + * This is a convenience method that accepts a ContentUnderstandingDefaults object. + * + * @param defaults The ContentUnderstandingDefaults instance with settings to update. + * @return the updated ContentUnderstandingDefaults on successful completion of {@link Mono}. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + public Mono updateDefaults(ContentUnderstandingDefaults defaults) { + return updateDefaultsWithResponse(BinaryData.fromObject(defaults), null) + .map(response -> response.getValue().toObject(ContentUnderstandingDefaults.class)); + } + + /** + * Extract content and fields from binary input. Uses default content type (application/octet-stream), default + * string encoding (utf16), and service default processing location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param binaryInput The binary content of the document to analyze. + * @return the {@link PollerFlux} for polling of the analyze operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginAnalyzeBinary(String analyzerId, + BinaryData binaryInput) { + return beginAnalyzeBinary(analyzerId, binaryInput, null, "application/octet-stream", null); + } + + /** + * Extract content and fields from binary input. Uses default content type (application/octet-stream), default + * string encoding (utf16), and service default processing location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param binaryInput The binary content of the document to analyze. + * @param inputRange Range of the input to analyze (ex. 1-3,5,9-). Document content uses 1-based page numbers; audio + * visual uses milliseconds. + * @return the {@link PollerFlux} for polling of the analyze operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginAnalyzeBinary(String analyzerId, + BinaryData binaryInput, String inputRange) { + return beginAnalyzeBinary(analyzerId, binaryInput, inputRange, "application/octet-stream", null); + } + + /** + * Extract content and fields from binary input. Uses default string encoding (utf16). + * + * @param analyzerId The unique identifier of the analyzer. + * @param binaryInput The binary content of the document to analyze. + * @param inputRange Range of the input to analyze (ex. 1-3,5,9-). Document content uses 1-based page numbers; audio + * visual uses milliseconds. + * @param contentType Request content type. + * @param processingLocation The location where the data may be processed. Set to null for service default. + * @return the {@link PollerFlux} for polling of the analyze operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginAnalyzeBinary(String analyzerId, + BinaryData binaryInput, String inputRange, String contentType, ProcessingLocation processingLocation) { + RequestOptions requestOptions = new RequestOptions(); + if (inputRange != null) { + requestOptions.addQueryParam("range", inputRange, false); + } + if (processingLocation != null) { + requestOptions.addQueryParam("processingLocation", processingLocation.toString(), false); + } + requestOptions.addQueryParam("stringEncoding", "utf16", false); + return serviceClient.beginAnalyzeBinaryWithModelAsync(analyzerId, contentType, binaryInput, requestOptions); + } + + /** + * Extract content and fields from inputs. Uses default string encoding (utf16), service default model deployments, + * and global processing location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param inputs The inputs to analyze. + * @return the {@link PollerFlux} for polling of the analyze operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginAnalyze(String analyzerId, + List inputs) { + return beginAnalyze(analyzerId, inputs, null, null); + } + + /** + * Extract content and fields from inputs. Uses default string encoding (utf16). + * + * @param analyzerId The unique identifier of the analyzer. + * @param inputs The inputs to analyze. + * @param modelDeployments Custom model deployment mappings. Set to null to use service defaults. + * @param processingLocation The processing location for the analysis. Set to null to use the service default. + * @return the {@link PollerFlux} for polling of the analyze operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginAnalyze(String analyzerId, + List inputs, Map modelDeployments, ProcessingLocation processingLocation) { + RequestOptions requestOptions = new RequestOptions(); + if (processingLocation != null) { + requestOptions.addQueryParam("processingLocation", processingLocation.toString(), false); + } + requestOptions.addQueryParam("stringEncoding", "utf16", false); + AnalyzeRequest1 analyzeRequest1Obj + = new AnalyzeRequest1().setInputs(inputs).setModelDeployments(modelDeployments); + BinaryData analyzeRequest1 = BinaryData.fromObject(analyzeRequest1Obj); + return serviceClient.beginAnalyzeWithModelAsync(analyzerId, analyzeRequest1, requestOptions); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingClient.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingClient.java new file mode 100644 index 000000000000..fe4efcbc8099 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingClient.java @@ -0,0 +1,2074 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding; + +import com.azure.ai.contentunderstanding.implementation.ContentUnderstandingClientImpl; +import com.azure.ai.contentunderstanding.implementation.JsonMergePatchHelper; +import com.azure.ai.contentunderstanding.implementation.models.AnalyzeRequest1; +import com.azure.ai.contentunderstanding.implementation.models.CopyAnalyzerRequest; +import com.azure.ai.contentunderstanding.implementation.models.GrantCopyAuthorizationRequest1; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentUnderstandingDefaults; +import com.azure.ai.contentunderstanding.models.CopyAuthorization; +import com.azure.ai.contentunderstanding.models.ProcessingLocation; +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.ReturnType; +import com.azure.core.annotation.ServiceClient; +import com.azure.core.annotation.ServiceMethod; +import com.azure.core.exception.ClientAuthenticationException; +import com.azure.core.exception.HttpResponseException; +import com.azure.core.exception.ResourceModifiedException; +import com.azure.core.exception.ResourceNotFoundException; +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.RequestOptions; +import com.azure.core.http.rest.Response; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.SyncPoller; +import java.util.List; +import java.util.Map; + +/** + * Initializes a new instance of the synchronous ContentUnderstandingClient type. + */ +@ServiceClient(builder = ContentUnderstandingClientBuilder.class) +public final class ContentUnderstandingClient { + + @Generated + private final ContentUnderstandingClientImpl serviceClient; + + /** + * Initializes an instance of ContentUnderstandingClient class. + * + * @param serviceClient the service client implementation. + */ + @Generated + ContentUnderstandingClient(ContentUnderstandingClientImpl serviceClient) { + this.serviceClient = serviceClient; + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     inputs (Optional): [
+     *          (Optional){
+     *             url: String (Optional)
+     *             data: byte[] (Optional)
+     *             name: String (Optional)
+     *             mimeType: String (Optional)
+     *             range: String (Optional)
+     *         }
+     *     ]
+     *     modelDeployments (Optional): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param analyzeRequest1 The analyzeRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of provides status details for long running operations. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyze(String analyzerId, BinaryData analyzeRequest1, + RequestOptions requestOptions) { + return this.serviceClient.beginAnalyze(analyzerId, analyzeRequest1, requestOptions); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
rangeStringNoRange of the input to analyze (ex. `1-3,5,9-`). Document content + * uses 1-based page numbers, while audio visual content uses integer milliseconds.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param contentType Request content type. + * @param binaryInput The binary content of the document to analyze. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of provides status details for long running operations. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyzeBinary(String analyzerId, String contentType, + BinaryData binaryInput, RequestOptions requestOptions) { + return this.serviceClient.beginAnalyzeBinary(analyzerId, contentType, binaryInput, requestOptions); + } + + /** + * Create a copy of the source analyzer to the current location. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     sourceAzureResourceId: String (Optional)
+     *     sourceRegion: String (Optional)
+     *     sourceAnalyzerId: String (Required)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param copyAnalyzerRequest The copyAnalyzerRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginCopyAnalyzer(String analyzerId, BinaryData copyAnalyzerRequest, + RequestOptions requestOptions) { + return this.serviceClient.beginCopyAnalyzer(analyzerId, copyAnalyzerRequest, requestOptions); + } + + /** + * Create a new analyzer asynchronously. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginCreateAnalyzer(String analyzerId, BinaryData resource, + RequestOptions requestOptions) { + return this.serviceClient.beginCreateAnalyzer(analyzerId, resource, requestOptions); + } + + /** + * Delete analyzer. + * + * @param analyzerId The unique identifier of the analyzer. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Response deleteAnalyzerWithResponse(String analyzerId, RequestOptions requestOptions) { + return this.serviceClient.deleteAnalyzerWithResponse(analyzerId, requestOptions); + } + + /** + * Mark the result of an analysis operation for deletion. + * + * @param operationId Operation identifier. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Response deleteResultWithResponse(String operationId, RequestOptions requestOptions) { + return this.serviceClient.deleteResultWithResponse(operationId, requestOptions); + } + + /** + * Get analyzer properties. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer properties along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAnalyzerWithResponse(String analyzerId, RequestOptions requestOptions) { + return this.serviceClient.getAnalyzerWithResponse(analyzerId, requestOptions); + } + + /** + * Return default settings for this Content Understanding resource. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Required): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return default settings for this Content Understanding resource along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getDefaultsWithResponse(RequestOptions requestOptions) { + return this.serviceClient.getDefaultsWithResponse(requestOptions); + } + + /** + * Get the status of an analyzer creation operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Required)
+     *         description: String (Optional)
+     *         tags (Optional): {
+     *             String: String (Required)
+     *         }
+     *         status: String(creating/ready/deleting/failed) (Required)
+     *         createdAt: OffsetDateTime (Required)
+     *         lastModifiedAt: OffsetDateTime (Required)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         baseAnalyzerId: String (Optional)
+     *         config (Optional): {
+     *             returnDetails: Boolean (Optional)
+     *             locales (Optional): [
+     *                 String (Optional)
+     *             ]
+     *             enableOcr: Boolean (Optional)
+     *             enableLayout: Boolean (Optional)
+     *             enableFigureDescription: Boolean (Optional)
+     *             enableFigureAnalysis: Boolean (Optional)
+     *             enableFormula: Boolean (Optional)
+     *             tableFormat: String(html/markdown) (Optional)
+     *             chartFormat: String(chartJs/markdown) (Optional)
+     *             annotationFormat: String(none/markdown) (Optional)
+     *             disableFaceBlurring: Boolean (Optional)
+     *             estimateFieldSourceAndConfidence: Boolean (Optional)
+     *             contentCategories (Optional): {
+     *                 String (Required): {
+     *                     description: String (Optional)
+     *                     analyzerId: String (Optional)
+     *                     analyzer (Optional): (recursive schema, see analyzer above)
+     *                 }
+     *             }
+     *             enableSegment: Boolean (Optional)
+     *             segmentPerPage: Boolean (Optional)
+     *             omitContent: Boolean (Optional)
+     *         }
+     *         fieldSchema (Optional): {
+     *             name: String (Optional)
+     *             description: String (Optional)
+     *             fields (Optional, Required on create): {
+     *                 String (Required): {
+     *                     method: String(generate/extract/classify) (Optional)
+     *                     type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                     description: String (Optional)
+     *                     items (Optional): (recursive schema, see items above)
+     *                     properties (Optional): {
+     *                         String (Required): (recursive schema, see String above)
+     *                     }
+     *                     examples (Optional): [
+     *                         String (Optional)
+     *                     ]
+     *                     enum (Optional): [
+     *                         String (Optional)
+     *                     ]
+     *                     enumDescriptions (Optional): {
+     *                         String: String (Required)
+     *                     }
+     *                     $ref: String (Optional)
+     *                     estimateSourceAndConfidence: Boolean (Optional)
+     *                 }
+     *             }
+     *             definitions (Optional): {
+     *                 String (Required): (recursive schema, see String above)
+     *             }
+     *         }
+     *         dynamicFieldSchema: Boolean (Optional)
+     *         processingLocation: String(geography/dataZone/global) (Optional)
+     *         knowledgeSources (Optional): [
+     *              (Optional){
+     *                 kind: String(labeledData) (Required)
+     *             }
+     *         ]
+     *         models (Optional): {
+     *             String: String (Required)
+     *         }
+     *         supportedModels (Optional): {
+     *             completion (Optional): [
+     *                 String (Optional)
+     *             ]
+     *             embedding (Optional): [
+     *                 String (Optional)
+     *             ]
+     *         }
+     *     }
+     *     usage (Optional): {
+     *         documentPagesMinimal: Integer (Optional)
+     *         documentPagesBasic: Integer (Optional)
+     *         documentPagesStandard: Integer (Optional)
+     *         audioHours: Double (Optional)
+     *         videoHours: Double (Optional)
+     *         contextualizationTokens: Integer (Optional)
+     *         tokens (Optional): {
+     *             String: int (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param operationId The unique ID of the operation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the status of an analyzer creation operation along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getOperationStatusWithResponse(String analyzerId, String operationId, + RequestOptions requestOptions) { + return this.serviceClient.getOperationStatusWithResponse(analyzerId, operationId, requestOptions); + } + + /** + * Get the result of an analysis operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     *     usage (Optional): {
+     *         documentPagesMinimal: Integer (Optional)
+     *         documentPagesBasic: Integer (Optional)
+     *         documentPagesStandard: Integer (Optional)
+     *         audioHours: Double (Optional)
+     *         videoHours: Double (Optional)
+     *         contextualizationTokens: Integer (Optional)
+     *         tokens (Optional): {
+     *             String: int (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + * @param operationId The unique ID of the operation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the result of an analysis operation along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getResultWithResponse(String operationId, RequestOptions requestOptions) { + return this.serviceClient.getResultWithResponse(operationId, requestOptions); + } + + /** + * Get a file associated with the result of an analysis operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + * @param operationId Operation identifier. + * @param path File path. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a file associated with the result of an analysis operation along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getResultFileWithResponse(String operationId, String path, + RequestOptions requestOptions) { + return this.serviceClient.getResultFileWithResponse(operationId, path, requestOptions); + } + + /** + * Get authorization for copying this analyzer to another location. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     targetAzureResourceId: String (Required)
+     *     targetRegion: String (Optional)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     source: String (Required)
+     *     targetAzureResourceId: String (Required)
+     *     expiresAt: OffsetDateTime (Required)
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param grantCopyAuthorizationRequest1 The grantCopyAuthorizationRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return authorization for copying this analyzer to another location along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Response grantCopyAuthorizationWithResponse(String analyzerId, + BinaryData grantCopyAuthorizationRequest1, RequestOptions requestOptions) { + return this.serviceClient.grantCopyAuthorizationWithResponse(analyzerId, grantCopyAuthorizationRequest1, + requestOptions); + } + + /** + * List analyzers. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return paged collection of ContentAnalyzer items as paginated response with {@link PagedIterable}. + */ + @Generated + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedIterable listAnalyzers(RequestOptions requestOptions) { + return this.serviceClient.listAnalyzers(requestOptions); + } + + /** + * Update analyzer properties. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer that extracts content and fields from multimodal documents along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Response updateAnalyzerWithResponse(String analyzerId, BinaryData resource, + RequestOptions requestOptions) { + return this.serviceClient.updateAnalyzerWithResponse(analyzerId, resource, requestOptions); + } + + /** + * Return default settings for this Content Understanding resource. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Optional): {
+     *          (Optional): {
+     *             String: String (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Required): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + * @param updateDefaultsRequest The updateDefaultsRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return default settings for this Content Understanding resource along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public Response updateDefaultsWithResponse(BinaryData updateDefaultsRequest, + RequestOptions requestOptions) { + return this.serviceClient.updateDefaultsWithResponse(updateDefaultsRequest, requestOptions); + } + + /** + * Extract content and fields from input. + * + * @param analyzerId The unique identifier of the analyzer. + * @param stringEncoding The string encoding format for content spans in the response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`."). + * @param inputs Inputs to analyze. Currently, only pro mode supports multiple inputs. + * @param modelDeployments Override default mapping of model names to deployments. + * Ex. { "gpt-4.1": "myGpt41Deployment", "text-embedding-3-large": "myTextEmbedding3LargeDeployment" }. + * @param processingLocation The location where the data may be processed. Defaults to global. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of provides status details for long running operations. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + SyncPoller beginAnalyze(String analyzerId, + String stringEncoding, List inputs, Map modelDeployments, + ProcessingLocation processingLocation) { + // Generated convenience method for beginAnalyzeWithModel + RequestOptions requestOptions = new RequestOptions(); + AnalyzeRequest1 analyzeRequest1Obj + = new AnalyzeRequest1().setInputs(inputs).setModelDeployments(modelDeployments); + BinaryData analyzeRequest1 = BinaryData.fromObject(analyzeRequest1Obj); + requestOptions.addQueryParam("stringEncoding", stringEncoding, false); + if (processingLocation != null) { + requestOptions.addQueryParam("processingLocation", processingLocation.toString(), false); + } + return serviceClient.beginAnalyzeWithModel(analyzerId, analyzeRequest1, requestOptions); + } + + /** + * Extract content and fields from input. + * + * @param analyzerId The unique identifier of the analyzer. + * @param stringEncoding The string encoding format for content spans in the response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`."). + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of provides status details for long running operations. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + SyncPoller beginAnalyze(String analyzerId, + String stringEncoding) { + // Generated convenience method for beginAnalyzeWithModel + RequestOptions requestOptions = new RequestOptions(); + AnalyzeRequest1 analyzeRequest1Obj = new AnalyzeRequest1(); + BinaryData analyzeRequest1 = BinaryData.fromObject(analyzeRequest1Obj); + requestOptions.addQueryParam("stringEncoding", stringEncoding, false); + return serviceClient.beginAnalyzeWithModel(analyzerId, analyzeRequest1, requestOptions); + } + + /** + * Extract content and fields from input. + * + * @param analyzerId The unique identifier of the analyzer. + * @param binaryInput The binary content of the document to analyze. + * @param stringEncoding The string encoding format for content spans in the response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`."). + * @param inputRange Range of the input to analyze (ex. `1-3,5,9-`). Document content uses 1-based page numbers, + * while audio visual content uses integer milliseconds. + * @param contentType Request content type. + * @param processingLocation The location where the data may be processed. Defaults to global. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of provides status details for long running operations. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + SyncPoller beginAnalyzeBinary(String analyzerId, + BinaryData binaryInput, String stringEncoding, String inputRange, String contentType, + ProcessingLocation processingLocation) { + // Generated convenience method for beginAnalyzeBinaryWithModel + RequestOptions requestOptions = new RequestOptions(); + requestOptions.addQueryParam("stringEncoding", stringEncoding, false); + if (inputRange != null) { + requestOptions.addQueryParam("range", inputRange, false); + } + if (processingLocation != null) { + requestOptions.addQueryParam("processingLocation", processingLocation.toString(), false); + } + return serviceClient.beginAnalyzeBinaryWithModel(analyzerId, contentType, binaryInput, requestOptions); + } + + /** + * Create a copy of the source analyzer to the current location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param sourceAnalyzerId Source analyzer ID. + * @param allowReplace Allow the operation to replace an existing resource. + * @param sourceAzureResourceId Azure resource ID of the source analyzer location. Defaults to the current resource. + * @param sourceRegion Azure region of the source analyzer location. Defaults to current region. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginCopyAnalyzer(String analyzerId, + String sourceAnalyzerId, Boolean allowReplace, String sourceAzureResourceId, String sourceRegion) { + // Generated convenience method for beginCopyAnalyzerWithModel + RequestOptions requestOptions = new RequestOptions(); + CopyAnalyzerRequest copyAnalyzerRequestObj + = new CopyAnalyzerRequest(sourceAnalyzerId).setSourceAzureResourceId(sourceAzureResourceId) + .setSourceRegion(sourceRegion); + BinaryData copyAnalyzerRequest = BinaryData.fromObject(copyAnalyzerRequestObj); + if (allowReplace != null) { + requestOptions.addQueryParam("allowReplace", String.valueOf(allowReplace), false); + } + return serviceClient.beginCopyAnalyzerWithModel(analyzerId, copyAnalyzerRequest, requestOptions); + } + + /** + * Create a copy of the source analyzer to the current location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param sourceAnalyzerId Source analyzer ID. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginCopyAnalyzer(String analyzerId, + String sourceAnalyzerId) { + // Generated convenience method for beginCopyAnalyzerWithModel + RequestOptions requestOptions = new RequestOptions(); + CopyAnalyzerRequest copyAnalyzerRequestObj = new CopyAnalyzerRequest(sourceAnalyzerId); + BinaryData copyAnalyzerRequest = BinaryData.fromObject(copyAnalyzerRequestObj); + return serviceClient.beginCopyAnalyzerWithModel(analyzerId, copyAnalyzerRequest, requestOptions); + } + + /** + * Create a new analyzer asynchronously. + * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param allowReplace Allow the operation to replace an existing resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginCreateAnalyzer(String analyzerId, + ContentAnalyzer resource, Boolean allowReplace) { + // Generated convenience method for beginCreateAnalyzerWithModel + RequestOptions requestOptions = new RequestOptions(); + if (allowReplace != null) { + requestOptions.addQueryParam("allowReplace", String.valueOf(allowReplace), false); + } + return serviceClient.beginCreateAnalyzerWithModel(analyzerId, BinaryData.fromObject(resource), requestOptions); + } + + /** + * Create a new analyzer asynchronously. + * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginCreateAnalyzer(String analyzerId, + ContentAnalyzer resource) { + // Generated convenience method for beginCreateAnalyzerWithModel + RequestOptions requestOptions = new RequestOptions(); + return serviceClient.beginCreateAnalyzerWithModel(analyzerId, BinaryData.fromObject(resource), requestOptions); + } + + /** + * Delete analyzer. + * + * @param analyzerId The unique identifier of the analyzer. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public void deleteAnalyzer(String analyzerId) { + // Generated convenience method for deleteAnalyzerWithResponse + RequestOptions requestOptions = new RequestOptions(); + deleteAnalyzerWithResponse(analyzerId, requestOptions).getValue(); + } + + /** + * Mark the result of an analysis operation for deletion. + * + * @param operationId Operation identifier. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public void deleteResult(String operationId) { + // Generated convenience method for deleteResultWithResponse + RequestOptions requestOptions = new RequestOptions(); + deleteResultWithResponse(operationId, requestOptions).getValue(); + } + + /** + * Get analyzer properties. + * + * @param analyzerId The unique identifier of the analyzer. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return analyzer properties. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public ContentAnalyzer getAnalyzer(String analyzerId) { + // Generated convenience method for getAnalyzerWithResponse + RequestOptions requestOptions = new RequestOptions(); + return getAnalyzerWithResponse(analyzerId, requestOptions).getValue().toObject(ContentAnalyzer.class); + } + + /** + * Return default settings for this Content Understanding resource. + * + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return default settings for this Content Understanding resource. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public ContentUnderstandingDefaults getDefaults() { + // Generated convenience method for getDefaultsWithResponse + RequestOptions requestOptions = new RequestOptions(); + return getDefaultsWithResponse(requestOptions).getValue().toObject(ContentUnderstandingDefaults.class); + } + + /** + * Get the status of an analyzer creation operation. + * + * @param analyzerId The unique identifier of the analyzer. + * @param operationId The unique ID of the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the status of an analyzer creation operation. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + ContentAnalyzerOperationStatus getOperationStatus(String analyzerId, String operationId) { + // Generated convenience method for getOperationStatusWithResponse + RequestOptions requestOptions = new RequestOptions(); + return getOperationStatusWithResponse(analyzerId, operationId, requestOptions).getValue() + .toObject(ContentAnalyzerOperationStatus.class); + } + + /** + * Get the result of an analysis operation. + * + * @param operationId The unique ID of the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the result of an analysis operation. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + ContentAnalyzerAnalyzeOperationStatus getResult(String operationId) { + // Generated convenience method for getResultWithResponse + RequestOptions requestOptions = new RequestOptions(); + return getResultWithResponse(operationId, requestOptions).getValue() + .toObject(ContentAnalyzerAnalyzeOperationStatus.class); + } + + /** + * Get a file associated with the result of an analysis operation. + * + * @param operationId Operation identifier. + * @param path File path. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a file associated with the result of an analysis operation. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public BinaryData getResultFile(String operationId, String path) { + // Generated convenience method for getResultFileWithResponse + RequestOptions requestOptions = new RequestOptions(); + return getResultFileWithResponse(operationId, path, requestOptions).getValue(); + } + + /** + * Get authorization for copying this analyzer to another location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param targetAzureResourceId Azure resource ID of the target analyzer location. + * @param targetRegion Azure region of the target analyzer location. Defaults to current region. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return authorization for copying this analyzer to another location. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public CopyAuthorization grantCopyAuthorization(String analyzerId, String targetAzureResourceId, + String targetRegion) { + // Generated convenience method for grantCopyAuthorizationWithResponse + RequestOptions requestOptions = new RequestOptions(); + GrantCopyAuthorizationRequest1 grantCopyAuthorizationRequest1Obj + = new GrantCopyAuthorizationRequest1(targetAzureResourceId).setTargetRegion(targetRegion); + BinaryData grantCopyAuthorizationRequest1 = BinaryData.fromObject(grantCopyAuthorizationRequest1Obj); + return grantCopyAuthorizationWithResponse(analyzerId, grantCopyAuthorizationRequest1, requestOptions).getValue() + .toObject(CopyAuthorization.class); + } + + /** + * Get authorization for copying this analyzer to another location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param targetAzureResourceId Azure resource ID of the target analyzer location. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return authorization for copying this analyzer to another location. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public CopyAuthorization grantCopyAuthorization(String analyzerId, String targetAzureResourceId) { + // Generated convenience method for grantCopyAuthorizationWithResponse + RequestOptions requestOptions = new RequestOptions(); + GrantCopyAuthorizationRequest1 grantCopyAuthorizationRequest1Obj + = new GrantCopyAuthorizationRequest1(targetAzureResourceId); + BinaryData grantCopyAuthorizationRequest1 = BinaryData.fromObject(grantCopyAuthorizationRequest1Obj); + return grantCopyAuthorizationWithResponse(analyzerId, grantCopyAuthorizationRequest1, requestOptions).getValue() + .toObject(CopyAuthorization.class); + } + + /** + * List analyzers. + * + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return paged collection of ContentAnalyzer items as paginated response with {@link PagedIterable}. + */ + @Generated + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedIterable listAnalyzers() { + // Generated convenience method for listAnalyzers + RequestOptions requestOptions = new RequestOptions(); + return serviceClient.listAnalyzers(requestOptions) + .mapPage(bodyItemValue -> bodyItemValue.toObject(ContentAnalyzer.class)); + } + + /** + * Update analyzer properties. + * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return analyzer that extracts content and fields from multimodal documents. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + public ContentAnalyzer updateAnalyzer(String analyzerId, ContentAnalyzer resource) { + // Generated convenience method for updateAnalyzerWithResponse + RequestOptions requestOptions = new RequestOptions(); + JsonMergePatchHelper.getContentAnalyzerAccessor().prepareModelForJsonMergePatch(resource, true); + BinaryData resourceInBinaryData = BinaryData.fromObject(resource); + // BinaryData.fromObject() will not fire serialization, use getLength() to fire serialization. + resourceInBinaryData.getLength(); + JsonMergePatchHelper.getContentAnalyzerAccessor().prepareModelForJsonMergePatch(resource, false); + return updateAnalyzerWithResponse(analyzerId, resourceInBinaryData, requestOptions).getValue() + .toObject(ContentAnalyzer.class); + } + + /** + * Update default model deployment settings. + * + * This is the recommended public API for updating default model deployment settings. This method provides a simpler + * API that accepts a Map of model names to deployment names. + * + * @param modelDeployments Mapping of model names to deployment names. For example: { "gpt-4.1": + * "myGpt41Deployment", "text-embedding-3-large": "myTextEmbedding3LargeDeployment" }. + * @return the updated ContentUnderstandingDefaults. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + public ContentUnderstandingDefaults updateDefaults(Map modelDeployments) { + ContentUnderstandingDefaults defaults = new ContentUnderstandingDefaults(modelDeployments); + Response response = updateDefaultsWithResponse(BinaryData.fromObject(defaults), null); + return response.getValue().toObject(ContentUnderstandingDefaults.class); + } + + /** + * Update default model deployment settings. + * + * This is a convenience method that accepts a ContentUnderstandingDefaults object. + * + * @param defaults The ContentUnderstandingDefaults instance with settings to update. + * @return the updated ContentUnderstandingDefaults. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + public ContentUnderstandingDefaults updateDefaults(ContentUnderstandingDefaults defaults) { + Response response = updateDefaultsWithResponse(BinaryData.fromObject(defaults), null); + return response.getValue().toObject(ContentUnderstandingDefaults.class); + } + + /** + * Extract content and fields from binary input. Uses default content type (application/octet-stream), default + * string encoding (utf16), and service default processing location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param binaryInput The binary content of the document to analyze. + * @return the {@link SyncPoller} for polling of the analyze operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyzeBinary(String analyzerId, + BinaryData binaryInput) { + return beginAnalyzeBinary(analyzerId, binaryInput, null, "application/octet-stream", null); + } + + /** + * Extract content and fields from binary input. Uses default content type (application/octet-stream), default + * string encoding (utf16), and service default processing location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param binaryInput The binary content of the document to analyze. + * @param inputRange Range of the input to analyze (ex. 1-3,5,9-). Document content uses 1-based page numbers; audio + * visual uses milliseconds. + * @return the {@link SyncPoller} for polling of the analyze operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyzeBinary(String analyzerId, + BinaryData binaryInput, String inputRange) { + return beginAnalyzeBinary(analyzerId, binaryInput, inputRange, "application/octet-stream", null); + } + + /** + * Extract content and fields from binary input. Uses default string encoding (utf16). + * + * @param analyzerId The unique identifier of the analyzer. + * @param binaryInput The binary content of the document to analyze. + * @param inputRange Range of the input to analyze (ex. 1-3,5,9-). Document content uses 1-based page numbers; audio + * visual uses milliseconds. + * @param contentType Request content type. + * @param processingLocation The location where the data may be processed. Set to null for service default. + * @return the {@link SyncPoller} for polling of the analyze operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyzeBinary(String analyzerId, + BinaryData binaryInput, String inputRange, String contentType, ProcessingLocation processingLocation) { + RequestOptions requestOptions = new RequestOptions(); + if (inputRange != null) { + requestOptions.addQueryParam("range", inputRange, false); + } + if (processingLocation != null) { + requestOptions.addQueryParam("processingLocation", processingLocation.toString(), false); + } + requestOptions.addQueryParam("stringEncoding", "utf16", false); + return serviceClient.beginAnalyzeBinaryWithModel(analyzerId, contentType, binaryInput, requestOptions); + } + + /** + * Extract content and fields from inputs. Uses default string encoding (utf16), service default model deployments, + * and global processing location. + * + * @param analyzerId The unique identifier of the analyzer. + * @param inputs The inputs to analyze. + * @return the {@link SyncPoller} for polling of the analyze operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyze(String analyzerId, + List inputs) { + return beginAnalyze(analyzerId, inputs, null, null); + } + + /** + * Extract content and fields from inputs. Uses default string encoding (utf16). + * + * @param analyzerId The unique identifier of the analyzer. + * @param inputs The inputs to analyze. + * @param modelDeployments Custom model deployment mappings. Set to null to use service defaults. + * @param processingLocation The processing location for the analysis. Set to null to use the service default. + * @return the {@link SyncPoller} for polling of the analyze operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws HttpResponseException thrown if the request is rejected by server. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyze(String analyzerId, + List inputs, Map modelDeployments, ProcessingLocation processingLocation) { + RequestOptions requestOptions = new RequestOptions(); + if (processingLocation != null) { + requestOptions.addQueryParam("processingLocation", processingLocation.toString(), false); + } + requestOptions.addQueryParam("stringEncoding", "utf16", false); + AnalyzeRequest1 analyzeRequest1Obj + = new AnalyzeRequest1().setInputs(inputs).setModelDeployments(modelDeployments); + BinaryData analyzeRequest1 = BinaryData.fromObject(analyzeRequest1Obj); + return serviceClient.beginAnalyzeWithModel(analyzerId, analyzeRequest1, requestOptions); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingClientBuilder.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingClientBuilder.java new file mode 100644 index 000000000000..ba2679ff465c --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingClientBuilder.java @@ -0,0 +1,356 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding; + +import com.azure.ai.contentunderstanding.implementation.ContentUnderstandingClientImpl; +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.ServiceClientBuilder; +import com.azure.core.client.traits.ConfigurationTrait; +import com.azure.core.client.traits.EndpointTrait; +import com.azure.core.client.traits.HttpTrait; +import com.azure.core.client.traits.KeyCredentialTrait; +import com.azure.core.client.traits.TokenCredentialTrait; +import com.azure.core.credential.KeyCredential; +import com.azure.core.credential.TokenCredential; +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpHeaders; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.HttpPipelineBuilder; +import com.azure.core.http.HttpPipelinePosition; +import com.azure.core.http.policy.AddDatePolicy; +import com.azure.core.http.policy.AddHeadersFromContextPolicy; +import com.azure.core.http.policy.AddHeadersPolicy; +import com.azure.core.http.policy.BearerTokenAuthenticationPolicy; +import com.azure.core.http.policy.HttpLogOptions; +import com.azure.core.http.policy.HttpLoggingPolicy; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.http.policy.HttpPolicyProviders; +import com.azure.core.http.policy.KeyCredentialPolicy; +import com.azure.core.http.policy.RequestIdPolicy; +import com.azure.core.http.policy.RetryOptions; +import com.azure.core.http.policy.RetryPolicy; +import com.azure.core.http.policy.UserAgentPolicy; +import com.azure.core.util.ClientOptions; +import com.azure.core.util.Configuration; +import com.azure.core.util.CoreUtils; +import com.azure.core.util.builder.ClientBuilderUtil; +import com.azure.core.util.logging.ClientLogger; +import com.azure.core.util.serializer.JacksonAdapter; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * A builder for creating a new instance of the ContentUnderstandingClient type. + */ +@ServiceClientBuilder(serviceClients = { ContentUnderstandingClient.class, ContentUnderstandingAsyncClient.class }) +public final class ContentUnderstandingClientBuilder implements HttpTrait, + ConfigurationTrait, TokenCredentialTrait, + KeyCredentialTrait, EndpointTrait { + @Generated + private static final String SDK_NAME = "name"; + + @Generated + private static final String SDK_VERSION = "version"; + + @Generated + private static final String[] DEFAULT_SCOPES = new String[] { "https://cognitiveservices.azure.com/.default" }; + + @Generated + private static final Map PROPERTIES + = CoreUtils.getProperties("azure-ai-contentunderstanding.properties"); + + @Generated + private final List pipelinePolicies; + + /** + * Create an instance of the ContentUnderstandingClientBuilder. + */ + @Generated + public ContentUnderstandingClientBuilder() { + this.pipelinePolicies = new ArrayList<>(); + } + + /* + * The HTTP client used to send the request. + */ + @Generated + private HttpClient httpClient; + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public ContentUnderstandingClientBuilder httpClient(HttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + /* + * The HTTP pipeline to send requests through. + */ + @Generated + private HttpPipeline pipeline; + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public ContentUnderstandingClientBuilder pipeline(HttpPipeline pipeline) { + if (this.pipeline != null && pipeline == null) { + LOGGER.atInfo().log("HttpPipeline is being set to 'null' when it was previously configured."); + } + this.pipeline = pipeline; + return this; + } + + /* + * The logging configuration for HTTP requests and responses. + */ + @Generated + private HttpLogOptions httpLogOptions; + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public ContentUnderstandingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { + this.httpLogOptions = httpLogOptions; + return this; + } + + /* + * The client options such as application ID and custom headers to set on a request. + */ + @Generated + private ClientOptions clientOptions; + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public ContentUnderstandingClientBuilder clientOptions(ClientOptions clientOptions) { + this.clientOptions = clientOptions; + return this; + } + + /* + * The retry options to configure retry policy for failed requests. + */ + @Generated + private RetryOptions retryOptions; + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public ContentUnderstandingClientBuilder retryOptions(RetryOptions retryOptions) { + this.retryOptions = retryOptions; + return this; + } + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public ContentUnderstandingClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { + Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."); + pipelinePolicies.add(customPolicy); + return this; + } + + /* + * The configuration store that is used during construction of the service client. + */ + @Generated + private Configuration configuration; + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public ContentUnderstandingClientBuilder configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } + + /* + * The TokenCredential used for authentication. + */ + @Generated + private TokenCredential tokenCredential; + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public ContentUnderstandingClientBuilder credential(TokenCredential tokenCredential) { + this.tokenCredential = tokenCredential; + return this; + } + + /* + * The KeyCredential used for authentication. + */ + @Generated + private KeyCredential keyCredential; + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public ContentUnderstandingClientBuilder credential(KeyCredential keyCredential) { + this.keyCredential = keyCredential; + return this; + } + + /* + * The service endpoint + */ + @Generated + private String endpoint; + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public ContentUnderstandingClientBuilder endpoint(String endpoint) { + this.endpoint = endpoint; + return this; + } + + /* + * Service version + */ + @Generated + private ContentUnderstandingServiceVersion serviceVersion; + + /** + * Sets Service version. + * + * @param serviceVersion the serviceVersion value. + * @return the ContentUnderstandingClientBuilder. + */ + @Generated + public ContentUnderstandingClientBuilder serviceVersion(ContentUnderstandingServiceVersion serviceVersion) { + this.serviceVersion = serviceVersion; + return this; + } + + /* + * The retry policy that will attempt to retry failed requests, if applicable. + */ + @Generated + private RetryPolicy retryPolicy; + + /** + * Sets The retry policy that will attempt to retry failed requests, if applicable. + * + * @param retryPolicy the retryPolicy value. + * @return the ContentUnderstandingClientBuilder. + */ + @Generated + public ContentUnderstandingClientBuilder retryPolicy(RetryPolicy retryPolicy) { + this.retryPolicy = retryPolicy; + return this; + } + + /** + * Builds an instance of ContentUnderstandingClientImpl with the provided parameters. + * + * @return an instance of ContentUnderstandingClientImpl. + */ + @Generated + private ContentUnderstandingClientImpl buildInnerClient() { + this.validateClient(); + HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline(); + ContentUnderstandingServiceVersion localServiceVersion + = (serviceVersion != null) ? serviceVersion : ContentUnderstandingServiceVersion.getLatest(); + ContentUnderstandingClientImpl client = new ContentUnderstandingClientImpl(localPipeline, + JacksonAdapter.createDefaultSerializerAdapter(), this.endpoint, localServiceVersion); + return client; + } + + @Generated + private void validateClient() { + // This method is invoked from 'buildInnerClient'/'buildClient' method. + // Developer can customize this method, to validate that the necessary conditions are met for the new client. + Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); + } + + @Generated + private HttpPipeline createHttpPipeline() { + Configuration buildConfiguration + = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; + HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions; + ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions; + List policies = new ArrayList<>(); + String clientName = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); + String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); + String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions); + policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); + policies.add(new RequestIdPolicy()); + policies.add(new AddHeadersFromContextPolicy()); + HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(localClientOptions); + if (headers != null) { + policies.add(new AddHeadersPolicy(headers)); + } + this.pipelinePolicies.stream() + .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) + .forEach(p -> policies.add(p)); + HttpPolicyProviders.addBeforeRetryPolicies(policies); + policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); + policies.add(new AddDatePolicy()); + if (keyCredential != null) { + policies.add(new KeyCredentialPolicy("Ocp-Apim-Subscription-Key", keyCredential)); + } + if (tokenCredential != null) { + policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPES)); + } + this.pipelinePolicies.stream() + .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) + .forEach(p -> policies.add(p)); + HttpPolicyProviders.addAfterRetryPolicies(policies); + policies.add(new HttpLoggingPolicy(localHttpLogOptions)); + HttpPipeline httpPipeline = new HttpPipelineBuilder().policies(policies.toArray(new HttpPipelinePolicy[0])) + .httpClient(httpClient) + .clientOptions(localClientOptions) + .build(); + return httpPipeline; + } + + /** + * Builds an instance of ContentUnderstandingAsyncClient class. + * + * @return an instance of ContentUnderstandingAsyncClient. + */ + @Generated + public ContentUnderstandingAsyncClient buildAsyncClient() { + return new ContentUnderstandingAsyncClient(buildInnerClient()); + } + + /** + * Builds an instance of ContentUnderstandingClient class. + * + * @return an instance of ContentUnderstandingClient. + */ + @Generated + public ContentUnderstandingClient buildClient() { + return new ContentUnderstandingClient(buildInnerClient()); + } + + private static final ClientLogger LOGGER = new ClientLogger(ContentUnderstandingClientBuilder.class); +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingServiceVersion.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingServiceVersion.java new file mode 100644 index 000000000000..5020caf9beb7 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingServiceVersion.java @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding; + +import com.azure.core.util.ServiceVersion; + +/** + * Service version of ContentUnderstandingClient. + */ +public enum ContentUnderstandingServiceVersion implements ServiceVersion { + /** + * Enum value 2025-11-01. + */ + V2025_11_01("2025-11-01"); + + private final String version; + + ContentUnderstandingServiceVersion(String version) { + this.version = version; + } + + /** + * {@inheritDoc} + */ + @Override + public String getVersion() { + return this.version; + } + + /** + * Gets the latest service version supported by this client library. + * + * @return The latest {@link ContentUnderstandingServiceVersion}. + */ + public static ContentUnderstandingServiceVersion getLatest() { + return V2025_11_01; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/ContentAnalyzerAnalyzeOperationStatusHelper.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/ContentAnalyzerAnalyzeOperationStatusHelper.java new file mode 100644 index 000000000000..3ab5f629a975 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/ContentAnalyzerAnalyzeOperationStatusHelper.java @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.ai.contentunderstanding.implementation; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; + +/** + * Helper class to access private members of ContentAnalyzerAnalyzeOperationStatus. + */ +public final class ContentAnalyzerAnalyzeOperationStatusHelper { + private static ContentAnalyzerAnalyzeOperationStatusAccessor accessor; + + /** + * Interface for accessing private members. + */ + public interface ContentAnalyzerAnalyzeOperationStatusAccessor { + void setOperationId(ContentAnalyzerAnalyzeOperationStatus status, String operationId); + } + + /** + * Sets the accessor. + * + * @param accessorInstance the accessor instance. + */ + public static void setAccessor(ContentAnalyzerAnalyzeOperationStatusAccessor accessorInstance) { + accessor = accessorInstance; + } + + /** + * Sets the operationId on a ContentAnalyzerAnalyzeOperationStatus instance. + * + * @param status the status instance. + * @param operationId the operationId to set. + */ + public static void setOperationId(ContentAnalyzerAnalyzeOperationStatus status, String operationId) { + accessor.setOperationId(status, operationId); + } + + private ContentAnalyzerAnalyzeOperationStatusHelper() { + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/ContentUnderstandingClientImpl.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/ContentUnderstandingClientImpl.java new file mode 100644 index 000000000000..2dd72a880a40 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/ContentUnderstandingClientImpl.java @@ -0,0 +1,6442 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.implementation; + +import com.azure.ai.contentunderstanding.ContentUnderstandingServiceVersion; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.core.annotation.BodyParam; +import com.azure.core.annotation.Delete; +import com.azure.core.annotation.ExpectedResponses; +import com.azure.core.annotation.Get; +import com.azure.core.annotation.HeaderParam; +import com.azure.core.annotation.Host; +import com.azure.core.annotation.HostParam; +import com.azure.core.annotation.Patch; +import com.azure.core.annotation.PathParam; +import com.azure.core.annotation.Post; +import com.azure.core.annotation.Put; +import com.azure.core.annotation.QueryParam; +import com.azure.core.annotation.ReturnType; +import com.azure.core.annotation.ServiceInterface; +import com.azure.core.annotation.ServiceMethod; +import com.azure.core.annotation.UnexpectedResponseExceptionType; +import com.azure.core.exception.ClientAuthenticationException; +import com.azure.core.exception.HttpResponseException; +import com.azure.core.exception.ResourceModifiedException; +import com.azure.core.exception.ResourceNotFoundException; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.HttpPipelineBuilder; +import com.azure.core.http.policy.RetryPolicy; +import com.azure.core.http.policy.UserAgentPolicy; +import com.azure.core.http.rest.PagedFlux; +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.PagedResponse; +import com.azure.core.http.rest.PagedResponseBase; +import com.azure.core.http.rest.RequestOptions; +import com.azure.core.http.rest.Response; +import com.azure.core.http.rest.RestProxy; +import com.azure.core.util.BinaryData; +import com.azure.core.util.Context; +import com.azure.core.util.FluxUtil; +import com.azure.core.util.polling.DefaultPollingStrategy; +import com.azure.core.util.polling.PollerFlux; +import com.azure.core.util.polling.PollingStrategyOptions; +import com.azure.core.util.polling.SyncDefaultPollingStrategy; +import com.azure.core.util.polling.SyncPoller; +import com.azure.core.util.serializer.JacksonAdapter; +import com.azure.core.util.serializer.SerializerAdapter; +import com.azure.core.util.serializer.TypeReference; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import reactor.core.publisher.Mono; + +/** + * Initializes a new instance of the ContentUnderstandingClient type. + */ +public final class ContentUnderstandingClientImpl { + /** + * The proxy service used to perform REST calls. + */ + private final ContentUnderstandingClientService service; + + /** + * Content Understanding service endpoint. + */ + private final String endpoint; + + /** + * Gets Content Understanding service endpoint. + * + * @return the endpoint value. + */ + public String getEndpoint() { + return this.endpoint; + } + + /** + * Service version. + */ + private final ContentUnderstandingServiceVersion serviceVersion; + + /** + * Gets Service version. + * + * @return the serviceVersion value. + */ + public ContentUnderstandingServiceVersion getServiceVersion() { + return this.serviceVersion; + } + + /** + * The HTTP pipeline to send requests through. + */ + private final HttpPipeline httpPipeline; + + /** + * Gets The HTTP pipeline to send requests through. + * + * @return the httpPipeline value. + */ + public HttpPipeline getHttpPipeline() { + return this.httpPipeline; + } + + /** + * The serializer to serialize an object into a string. + */ + private final SerializerAdapter serializerAdapter; + + /** + * Gets The serializer to serialize an object into a string. + * + * @return the serializerAdapter value. + */ + public SerializerAdapter getSerializerAdapter() { + return this.serializerAdapter; + } + + /** + * Initializes an instance of ContentUnderstandingClient client. + * + * @param endpoint Content Understanding service endpoint. + * @param serviceVersion Service version. + */ + public ContentUnderstandingClientImpl(String endpoint, ContentUnderstandingServiceVersion serviceVersion) { + this(new HttpPipelineBuilder().policies(new UserAgentPolicy(), new RetryPolicy()).build(), + JacksonAdapter.createDefaultSerializerAdapter(), endpoint, serviceVersion); + } + + /** + * Initializes an instance of ContentUnderstandingClient client. + * + * @param httpPipeline The HTTP pipeline to send requests through. + * @param endpoint Content Understanding service endpoint. + * @param serviceVersion Service version. + */ + public ContentUnderstandingClientImpl(HttpPipeline httpPipeline, String endpoint, + ContentUnderstandingServiceVersion serviceVersion) { + this(httpPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, serviceVersion); + } + + /** + * Initializes an instance of ContentUnderstandingClient client. + * + * @param httpPipeline The HTTP pipeline to send requests through. + * @param serializerAdapter The serializer to serialize an object into a string. + * @param endpoint Content Understanding service endpoint. + * @param serviceVersion Service version. + */ + public ContentUnderstandingClientImpl(HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, + String endpoint, ContentUnderstandingServiceVersion serviceVersion) { + this.httpPipeline = httpPipeline; + this.serializerAdapter = serializerAdapter; + this.endpoint = endpoint; + this.serviceVersion = serviceVersion; + this.service + = RestProxy.create(ContentUnderstandingClientService.class, this.httpPipeline, this.getSerializerAdapter()); + } + + /** + * The interface defining all the services for ContentUnderstandingClient to be used by the proxy service to perform + * REST calls. + */ + @Host("{endpoint}/contentunderstanding") + @ServiceInterface(name = "ContentUnderstandingClient") + public interface ContentUnderstandingClientService { + @Post("/analyzers/{analyzerId}:analyze") + @ExpectedResponses({ 202 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> analyze(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("application/json") BinaryData analyzeRequest1, RequestOptions requestOptions, Context context); + + @Post("/analyzers/{analyzerId}:analyze") + @ExpectedResponses({ 202 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response analyzeSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("application/json") BinaryData analyzeRequest1, RequestOptions requestOptions, Context context); + + @Post("/analyzers/{analyzerId}:analyzeBinary") + @ExpectedResponses({ 202 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> analyzeBinary(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("content-type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("*/*") BinaryData binaryInput, RequestOptions requestOptions, Context context); + + @Post("/analyzers/{analyzerId}:analyzeBinary") + @ExpectedResponses({ 202 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response analyzeBinarySync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("content-type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("*/*") BinaryData binaryInput, RequestOptions requestOptions, Context context); + + @Post("/analyzers/{analyzerId}:copy") + @ExpectedResponses({ 200, 201 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> copyAnalyzer(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("application/json") BinaryData copyAnalyzerRequest, RequestOptions requestOptions, + Context context); + + @Post("/analyzers/{analyzerId}:copy") + @ExpectedResponses({ 200, 201 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response copyAnalyzerSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("application/json") BinaryData copyAnalyzerRequest, RequestOptions requestOptions, + Context context); + + @Put("/analyzers/{analyzerId}") + @ExpectedResponses({ 200, 201 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> createAnalyzer(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("application/json") BinaryData resource, RequestOptions requestOptions, Context context); + + @Put("/analyzers/{analyzerId}") + @ExpectedResponses({ 200, 201 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response createAnalyzerSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("application/json") BinaryData resource, RequestOptions requestOptions, Context context); + + @Delete("/analyzers/{analyzerId}") + @ExpectedResponses({ 204 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> deleteAnalyzer(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + RequestOptions requestOptions, Context context); + + @Delete("/analyzers/{analyzerId}") + @ExpectedResponses({ 204 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response deleteAnalyzerSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + RequestOptions requestOptions, Context context); + + @Delete("/analyzerResults/{operationId}") + @ExpectedResponses({ 204 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> deleteResult(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("operationId") String operationId, + RequestOptions requestOptions, Context context); + + @Delete("/analyzerResults/{operationId}") + @ExpectedResponses({ 204 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response deleteResultSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("operationId") String operationId, + RequestOptions requestOptions, Context context); + + @Get("/analyzers/{analyzerId}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getAnalyzer(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Accept") String accept, RequestOptions requestOptions, Context context); + + @Get("/analyzers/{analyzerId}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getAnalyzerSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Accept") String accept, RequestOptions requestOptions, Context context); + + @Get("/defaults") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getDefaults(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @HeaderParam("Accept") String accept, + RequestOptions requestOptions, Context context); + + @Get("/defaults") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getDefaultsSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @HeaderParam("Accept") String accept, + RequestOptions requestOptions, Context context); + + @Get("/analyzers/{analyzerId}/operations/{operationId}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getOperationStatus(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @PathParam("operationId") String operationId, @HeaderParam("Accept") String accept, + RequestOptions requestOptions, Context context); + + @Get("/analyzers/{analyzerId}/operations/{operationId}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getOperationStatusSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @PathParam("operationId") String operationId, @HeaderParam("Accept") String accept, + RequestOptions requestOptions, Context context); + + @Get("/analyzerResults/{operationId}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getResult(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("operationId") String operationId, + @HeaderParam("Accept") String accept, RequestOptions requestOptions, Context context); + + @Get("/analyzerResults/{operationId}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getResultSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("operationId") String operationId, + @HeaderParam("Accept") String accept, RequestOptions requestOptions, Context context); + + @Get("/analyzerResults/{operationId}/files/{path}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getResultFile(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("operationId") String operationId, + @PathParam(value = "path", encoded = true) String path, @HeaderParam("Accept") String accept, + RequestOptions requestOptions, Context context); + + @Get("/analyzerResults/{operationId}/files/{path}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getResultFileSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("operationId") String operationId, + @PathParam(value = "path", encoded = true) String path, @HeaderParam("Accept") String accept, + RequestOptions requestOptions, Context context); + + @Post("/analyzers/{analyzerId}:grantCopyAuthorization") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> grantCopyAuthorization(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("application/json") BinaryData grantCopyAuthorizationRequest1, RequestOptions requestOptions, + Context context); + + @Post("/analyzers/{analyzerId}:grantCopyAuthorization") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response grantCopyAuthorizationSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("application/json") BinaryData grantCopyAuthorizationRequest1, RequestOptions requestOptions, + Context context); + + @Get("/analyzers") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> listAnalyzers(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @HeaderParam("Accept") String accept, + RequestOptions requestOptions, Context context); + + @Get("/analyzers") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response listAnalyzersSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @HeaderParam("Accept") String accept, + RequestOptions requestOptions, Context context); + + @Patch("/analyzers/{analyzerId}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> updateAnalyzer(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("application/merge-patch+json") BinaryData resource, RequestOptions requestOptions, + Context context); + + @Patch("/analyzers/{analyzerId}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response updateAnalyzerSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("analyzerId") String analyzerId, + @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, + @BodyParam("application/merge-patch+json") BinaryData resource, RequestOptions requestOptions, + Context context); + + @Patch("/defaults") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> updateDefaults(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @HeaderParam("Content-Type") String contentType, + @HeaderParam("Accept") String accept, + @BodyParam("application/merge-patch+json") BinaryData updateDefaultsRequest, RequestOptions requestOptions, + Context context); + + @Patch("/defaults") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response updateDefaultsSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @HeaderParam("Content-Type") String contentType, + @HeaderParam("Accept") String accept, + @BodyParam("application/merge-patch+json") BinaryData updateDefaultsRequest, RequestOptions requestOptions, + Context context); + + @Get("{nextLink}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> listAnalyzersNext(@PathParam(value = "nextLink", encoded = true) String nextLink, + @HostParam("endpoint") String endpoint, @HeaderParam("Accept") String accept, RequestOptions requestOptions, + Context context); + + @Get("{nextLink}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) + @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) + @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response listAnalyzersNextSync(@PathParam(value = "nextLink", encoded = true) String nextLink, + @HostParam("endpoint") String endpoint, @HeaderParam("Accept") String accept, RequestOptions requestOptions, + Context context); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     inputs (Optional): [
+     *          (Optional){
+     *             url: String (Optional)
+     *             data: byte[] (Optional)
+     *             name: String (Optional)
+     *             mimeType: String (Optional)
+     *             range: String (Optional)
+     *         }
+     *     ]
+     *     modelDeployments (Optional): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param analyzeRequest1 The analyzeRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return provides status details for long running operations along with {@link Response} on successful completion + * of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> analyzeWithResponseAsync(String analyzerId, BinaryData analyzeRequest1, + RequestOptions requestOptions) { + final String contentType = "application/json"; + final String accept = "application/json"; + return FluxUtil + .withContext(context -> service.analyze(this.getEndpoint(), this.getServiceVersion().getVersion(), + analyzerId, contentType, accept, analyzeRequest1, requestOptions, context)); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     inputs (Optional): [
+     *          (Optional){
+     *             url: String (Optional)
+     *             data: byte[] (Optional)
+     *             name: String (Optional)
+     *             mimeType: String (Optional)
+     *             range: String (Optional)
+     *         }
+     *     ]
+     *     modelDeployments (Optional): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param analyzeRequest1 The analyzeRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return provides status details for long running operations along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Response analyzeWithResponse(String analyzerId, BinaryData analyzeRequest1, + RequestOptions requestOptions) { + final String contentType = "application/json"; + final String accept = "application/json"; + return service.analyzeSync(this.getEndpoint(), this.getServiceVersion().getVersion(), analyzerId, contentType, + accept, analyzeRequest1, requestOptions, Context.NONE); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     inputs (Optional): [
+     *          (Optional){
+     *             url: String (Optional)
+     *             data: byte[] (Optional)
+     *             name: String (Optional)
+     *             mimeType: String (Optional)
+     *             range: String (Optional)
+     *         }
+     *     ]
+     *     modelDeployments (Optional): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param analyzeRequest1 The analyzeRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of provides status details for long running operations. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux + beginAnalyzeWithModelAsync(String analyzerId, BinaryData analyzeRequest1, RequestOptions requestOptions) { + return PollerFlux.create(Duration.ofSeconds(1), + () -> this.analyzeWithResponseAsync(analyzerId, analyzeRequest1, requestOptions), + new com.azure.ai.contentunderstanding.implementation.OperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(ContentAnalyzerAnalyzeOperationStatus.class), + TypeReference.createInstance(AnalyzeResult.class)); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     inputs (Optional): [
+     *          (Optional){
+     *             url: String (Optional)
+     *             data: byte[] (Optional)
+     *             name: String (Optional)
+     *             mimeType: String (Optional)
+     *             range: String (Optional)
+     *         }
+     *     ]
+     *     modelDeployments (Optional): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param analyzeRequest1 The analyzeRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of provides status details for long running operations. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyzeWithModel(String analyzerId, + BinaryData analyzeRequest1, RequestOptions requestOptions) { + return SyncPoller.createPoller(Duration.ofSeconds(1), + () -> this.analyzeWithResponse(analyzerId, analyzeRequest1, requestOptions), + new com.azure.ai.contentunderstanding.implementation.SyncOperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(ContentAnalyzerAnalyzeOperationStatus.class), + TypeReference.createInstance(AnalyzeResult.class)); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     inputs (Optional): [
+     *          (Optional){
+     *             url: String (Optional)
+     *             data: byte[] (Optional)
+     *             name: String (Optional)
+     *             mimeType: String (Optional)
+     *             range: String (Optional)
+     *         }
+     *     ]
+     *     modelDeployments (Optional): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param analyzeRequest1 The analyzeRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of provides status details for long running operations. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginAnalyzeAsync(String analyzerId, BinaryData analyzeRequest1, + RequestOptions requestOptions) { + return PollerFlux.create(Duration.ofSeconds(1), + () -> this.analyzeWithResponseAsync(analyzerId, analyzeRequest1, requestOptions), + new com.azure.ai.contentunderstanding.implementation.OperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(BinaryData.class), TypeReference.createInstance(BinaryData.class)); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     inputs (Optional): [
+     *          (Optional){
+     *             url: String (Optional)
+     *             data: byte[] (Optional)
+     *             name: String (Optional)
+     *             mimeType: String (Optional)
+     *             range: String (Optional)
+     *         }
+     *     ]
+     *     modelDeployments (Optional): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param analyzeRequest1 The analyzeRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of provides status details for long running operations. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyze(String analyzerId, BinaryData analyzeRequest1, + RequestOptions requestOptions) { + return SyncPoller.createPoller(Duration.ofSeconds(1), + () -> this.analyzeWithResponse(analyzerId, analyzeRequest1, requestOptions), + new com.azure.ai.contentunderstanding.implementation.SyncOperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(BinaryData.class), TypeReference.createInstance(BinaryData.class)); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
rangeStringNoRange of the input to analyze (ex. `1-3,5,9-`). Document content + * uses 1-based page numbers, while audio visual content uses integer milliseconds.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param contentType Request content type. + * @param binaryInput The binary content of the document to analyze. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return provides status details for long running operations along with {@link Response} on successful completion + * of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> analyzeBinaryWithResponseAsync(String analyzerId, String contentType, + BinaryData binaryInput, RequestOptions requestOptions) { + final String accept = "application/json"; + return FluxUtil + .withContext(context -> service.analyzeBinary(this.getEndpoint(), this.getServiceVersion().getVersion(), + analyzerId, contentType, accept, binaryInput, requestOptions, context)); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
rangeStringNoRange of the input to analyze (ex. `1-3,5,9-`). Document content + * uses 1-based page numbers, while audio visual content uses integer milliseconds.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param contentType Request content type. + * @param binaryInput The binary content of the document to analyze. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return provides status details for long running operations along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Response analyzeBinaryWithResponse(String analyzerId, String contentType, + BinaryData binaryInput, RequestOptions requestOptions) { + final String accept = "application/json"; + return service.analyzeBinarySync(this.getEndpoint(), this.getServiceVersion().getVersion(), analyzerId, + contentType, accept, binaryInput, requestOptions, Context.NONE); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
rangeStringNoRange of the input to analyze (ex. `1-3,5,9-`). Document content + * uses 1-based page numbers, while audio visual content uses integer milliseconds.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param contentType Request content type. + * @param binaryInput The binary content of the document to analyze. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of provides status details for long running operations. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginAnalyzeBinaryWithModelAsync( + String analyzerId, String contentType, BinaryData binaryInput, RequestOptions requestOptions) { + return PollerFlux.create(Duration.ofSeconds(1), + () -> this.analyzeBinaryWithResponseAsync(analyzerId, contentType, binaryInput, requestOptions), + new com.azure.ai.contentunderstanding.implementation.OperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(ContentAnalyzerAnalyzeOperationStatus.class), + TypeReference.createInstance(AnalyzeResult.class)); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
rangeStringNoRange of the input to analyze (ex. `1-3,5,9-`). Document content + * uses 1-based page numbers, while audio visual content uses integer milliseconds.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param contentType Request content type. + * @param binaryInput The binary content of the document to analyze. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of provides status details for long running operations. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyzeBinaryWithModel( + String analyzerId, String contentType, BinaryData binaryInput, RequestOptions requestOptions) { + return SyncPoller.createPoller(Duration.ofSeconds(1), + () -> this.analyzeBinaryWithResponse(analyzerId, contentType, binaryInput, requestOptions), + new com.azure.ai.contentunderstanding.implementation.SyncOperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(ContentAnalyzerAnalyzeOperationStatus.class), + TypeReference.createInstance(AnalyzeResult.class)); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
rangeStringNoRange of the input to analyze (ex. `1-3,5,9-`). Document content + * uses 1-based page numbers, while audio visual content uses integer milliseconds.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param contentType Request content type. + * @param binaryInput The binary content of the document to analyze. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of provides status details for long running operations. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginAnalyzeBinaryAsync(String analyzerId, String contentType, + BinaryData binaryInput, RequestOptions requestOptions) { + return PollerFlux.create(Duration.ofSeconds(1), + () -> this.analyzeBinaryWithResponseAsync(analyzerId, contentType, binaryInput, requestOptions), + new com.azure.ai.contentunderstanding.implementation.OperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(BinaryData.class), TypeReference.createInstance(BinaryData.class)); + } + + /** + * Extract content and fields from input. + *

Query Parameters

+ * + * + * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
stringEncodingStringNo The string encoding format for content spans in the + * response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.")
processingLocationStringNoThe location where the data may be processed. + * Defaults to global. Allowed values: "geography", "dataZone", "global".
rangeStringNoRange of the input to analyze (ex. `1-3,5,9-`). Document content + * uses 1-based page numbers, while audio visual content uses integer milliseconds.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param contentType Request content type. + * @param binaryInput The binary content of the document to analyze. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of provides status details for long running operations. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginAnalyzeBinary(String analyzerId, String contentType, + BinaryData binaryInput, RequestOptions requestOptions) { + return SyncPoller.createPoller(Duration.ofSeconds(1), + () -> this.analyzeBinaryWithResponse(analyzerId, contentType, binaryInput, requestOptions), + new com.azure.ai.contentunderstanding.implementation.SyncOperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(BinaryData.class), TypeReference.createInstance(BinaryData.class)); + } + + /** + * Create a copy of the source analyzer to the current location. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     sourceAzureResourceId: String (Optional)
+     *     sourceRegion: String (Optional)
+     *     sourceAnalyzerId: String (Required)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param copyAnalyzerRequest The copyAnalyzerRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer that extracts content and fields from multimodal documents along with {@link Response} on + * successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> copyAnalyzerWithResponseAsync(String analyzerId, BinaryData copyAnalyzerRequest, + RequestOptions requestOptions) { + final String contentType = "application/json"; + final String accept = "application/json"; + return FluxUtil + .withContext(context -> service.copyAnalyzer(this.getEndpoint(), this.getServiceVersion().getVersion(), + analyzerId, contentType, accept, copyAnalyzerRequest, requestOptions, context)); + } + + /** + * Create a copy of the source analyzer to the current location. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     sourceAzureResourceId: String (Optional)
+     *     sourceRegion: String (Optional)
+     *     sourceAnalyzerId: String (Required)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param copyAnalyzerRequest The copyAnalyzerRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer that extracts content and fields from multimodal documents along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Response copyAnalyzerWithResponse(String analyzerId, BinaryData copyAnalyzerRequest, + RequestOptions requestOptions) { + final String contentType = "application/json"; + final String accept = "application/json"; + return service.copyAnalyzerSync(this.getEndpoint(), this.getServiceVersion().getVersion(), analyzerId, + contentType, accept, copyAnalyzerRequest, requestOptions, Context.NONE); + } + + /** + * Create a copy of the source analyzer to the current location. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     sourceAzureResourceId: String (Optional)
+     *     sourceRegion: String (Optional)
+     *     sourceAnalyzerId: String (Required)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param copyAnalyzerRequest The copyAnalyzerRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginCopyAnalyzerWithModelAsync( + String analyzerId, BinaryData copyAnalyzerRequest, RequestOptions requestOptions) { + return PollerFlux.create(Duration.ofSeconds(1), + () -> this.copyAnalyzerWithResponseAsync(analyzerId, copyAnalyzerRequest, requestOptions), + new com.azure.ai.contentunderstanding.implementation.OperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(ContentAnalyzerOperationStatus.class), + TypeReference.createInstance(ContentAnalyzer.class)); + } + + /** + * Create a copy of the source analyzer to the current location. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     sourceAzureResourceId: String (Optional)
+     *     sourceRegion: String (Optional)
+     *     sourceAnalyzerId: String (Required)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param copyAnalyzerRequest The copyAnalyzerRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginCopyAnalyzerWithModel(String analyzerId, + BinaryData copyAnalyzerRequest, RequestOptions requestOptions) { + return SyncPoller.createPoller(Duration.ofSeconds(1), + () -> this.copyAnalyzerWithResponse(analyzerId, copyAnalyzerRequest, requestOptions), + new com.azure.ai.contentunderstanding.implementation.SyncOperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(ContentAnalyzerOperationStatus.class), + TypeReference.createInstance(ContentAnalyzer.class)); + } + + /** + * Create a copy of the source analyzer to the current location. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     sourceAzureResourceId: String (Optional)
+     *     sourceRegion: String (Optional)
+     *     sourceAnalyzerId: String (Required)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param copyAnalyzerRequest The copyAnalyzerRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginCopyAnalyzerAsync(String analyzerId, BinaryData copyAnalyzerRequest, + RequestOptions requestOptions) { + return PollerFlux.create(Duration.ofSeconds(1), + () -> this.copyAnalyzerWithResponseAsync(analyzerId, copyAnalyzerRequest, requestOptions), + new com.azure.ai.contentunderstanding.implementation.OperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(BinaryData.class), TypeReference.createInstance(BinaryData.class)); + } + + /** + * Create a copy of the source analyzer to the current location. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     sourceAzureResourceId: String (Optional)
+     *     sourceRegion: String (Optional)
+     *     sourceAnalyzerId: String (Required)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param copyAnalyzerRequest The copyAnalyzerRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginCopyAnalyzer(String analyzerId, BinaryData copyAnalyzerRequest, + RequestOptions requestOptions) { + return SyncPoller.createPoller(Duration.ofSeconds(1), + () -> this.copyAnalyzerWithResponse(analyzerId, copyAnalyzerRequest, requestOptions), + new com.azure.ai.contentunderstanding.implementation.SyncOperationLocationPollingStrategy<>( + new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion()), + "result"), + TypeReference.createInstance(BinaryData.class), TypeReference.createInstance(BinaryData.class)); + } + + /** + * Create a new analyzer asynchronously. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer that extracts content and fields from multimodal documents along with {@link Response} on + * successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> createAnalyzerWithResponseAsync(String analyzerId, BinaryData resource, + RequestOptions requestOptions) { + final String contentType = "application/json"; + final String accept = "application/json"; + return FluxUtil.withContext(context -> service.createAnalyzer(this.getEndpoint(), + this.getServiceVersion().getVersion(), analyzerId, contentType, accept, resource, requestOptions, context)); + } + + /** + * Create a new analyzer asynchronously. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer that extracts content and fields from multimodal documents along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Response createAnalyzerWithResponse(String analyzerId, BinaryData resource, + RequestOptions requestOptions) { + final String contentType = "application/json"; + final String accept = "application/json"; + return service.createAnalyzerSync(this.getEndpoint(), this.getServiceVersion().getVersion(), analyzerId, + contentType, accept, resource, requestOptions, Context.NONE); + } + + /** + * Create a new analyzer asynchronously. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux + beginCreateAnalyzerWithModelAsync(String analyzerId, BinaryData resource, RequestOptions requestOptions) { + return PollerFlux.create(Duration.ofSeconds(1), + () -> this.createAnalyzerWithResponseAsync(analyzerId, resource, requestOptions), + new DefaultPollingStrategy<>(new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion())), + TypeReference.createInstance(ContentAnalyzerOperationStatus.class), + TypeReference.createInstance(ContentAnalyzer.class)); + } + + /** + * Create a new analyzer asynchronously. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginCreateAnalyzerWithModel(String analyzerId, + BinaryData resource, RequestOptions requestOptions) { + return SyncPoller.createPoller(Duration.ofSeconds(1), + () -> this.createAnalyzerWithResponse(analyzerId, resource, requestOptions), + new SyncDefaultPollingStrategy<>(new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion())), + TypeReference.createInstance(ContentAnalyzerOperationStatus.class), + TypeReference.createInstance(ContentAnalyzer.class)); + } + + /** + * Create a new analyzer asynchronously. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link PollerFlux} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public PollerFlux beginCreateAnalyzerAsync(String analyzerId, BinaryData resource, + RequestOptions requestOptions) { + return PollerFlux.create(Duration.ofSeconds(1), + () -> this.createAnalyzerWithResponseAsync(analyzerId, resource, requestOptions), + new DefaultPollingStrategy<>(new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion())), + TypeReference.createInstance(BinaryData.class), TypeReference.createInstance(BinaryData.class)); + } + + /** + * Create a new analyzer asynchronously. + *

Query Parameters

+ * + * + * + * + *
Query Parameters
NameTypeRequiredDescription
allowReplaceBooleanNoAllow the operation to replace an existing + * resource.
+ * You can add these to a request with {@link RequestOptions#addQueryParam} + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link SyncPoller} for polling of analyzer that extracts content and fields from multimodal + * documents. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller beginCreateAnalyzer(String analyzerId, BinaryData resource, + RequestOptions requestOptions) { + return SyncPoller.createPoller(Duration.ofSeconds(1), + () -> this.createAnalyzerWithResponse(analyzerId, resource, requestOptions), + new SyncDefaultPollingStrategy<>(new PollingStrategyOptions(this.getHttpPipeline()) + .setEndpoint("{endpoint}/contentunderstanding".replace("{endpoint}", this.getEndpoint())) + .setContext(requestOptions != null && requestOptions.getContext() != null + ? requestOptions.getContext() + : Context.NONE) + .setServiceVersion(this.getServiceVersion().getVersion())), + TypeReference.createInstance(BinaryData.class), TypeReference.createInstance(BinaryData.class)); + } + + /** + * Delete analyzer. + * + * @param analyzerId The unique identifier of the analyzer. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> deleteAnalyzerWithResponseAsync(String analyzerId, RequestOptions requestOptions) { + return FluxUtil.withContext(context -> service.deleteAnalyzer(this.getEndpoint(), + this.getServiceVersion().getVersion(), analyzerId, requestOptions, context)); + } + + /** + * Delete analyzer. + * + * @param analyzerId The unique identifier of the analyzer. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response deleteAnalyzerWithResponse(String analyzerId, RequestOptions requestOptions) { + return service.deleteAnalyzerSync(this.getEndpoint(), this.getServiceVersion().getVersion(), analyzerId, + requestOptions, Context.NONE); + } + + /** + * Mark the result of an analysis operation for deletion. + * + * @param operationId Operation identifier. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> deleteResultWithResponseAsync(String operationId, RequestOptions requestOptions) { + return FluxUtil.withContext(context -> service.deleteResult(this.getEndpoint(), + this.getServiceVersion().getVersion(), operationId, requestOptions, context)); + } + + /** + * Mark the result of an analysis operation for deletion. + * + * @param operationId Operation identifier. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response deleteResultWithResponse(String operationId, RequestOptions requestOptions) { + return service.deleteResultSync(this.getEndpoint(), this.getServiceVersion().getVersion(), operationId, + requestOptions, Context.NONE); + } + + /** + * Get analyzer properties. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer properties along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAnalyzerWithResponseAsync(String analyzerId, RequestOptions requestOptions) { + final String accept = "application/json"; + return FluxUtil.withContext(context -> service.getAnalyzer(this.getEndpoint(), + this.getServiceVersion().getVersion(), analyzerId, accept, requestOptions, context)); + } + + /** + * Get analyzer properties. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer properties along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAnalyzerWithResponse(String analyzerId, RequestOptions requestOptions) { + final String accept = "application/json"; + return service.getAnalyzerSync(this.getEndpoint(), this.getServiceVersion().getVersion(), analyzerId, accept, + requestOptions, Context.NONE); + } + + /** + * Return default settings for this Content Understanding resource. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Required): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return default settings for this Content Understanding resource along with {@link Response} on successful + * completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getDefaultsWithResponseAsync(RequestOptions requestOptions) { + final String accept = "application/json"; + return FluxUtil.withContext(context -> service.getDefaults(this.getEndpoint(), + this.getServiceVersion().getVersion(), accept, requestOptions, context)); + } + + /** + * Return default settings for this Content Understanding resource. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Required): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return default settings for this Content Understanding resource along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getDefaultsWithResponse(RequestOptions requestOptions) { + final String accept = "application/json"; + return service.getDefaultsSync(this.getEndpoint(), this.getServiceVersion().getVersion(), accept, + requestOptions, Context.NONE); + } + + /** + * Get the status of an analyzer creation operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Required)
+     *         description: String (Optional)
+     *         tags (Optional): {
+     *             String: String (Required)
+     *         }
+     *         status: String(creating/ready/deleting/failed) (Required)
+     *         createdAt: OffsetDateTime (Required)
+     *         lastModifiedAt: OffsetDateTime (Required)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         baseAnalyzerId: String (Optional)
+     *         config (Optional): {
+     *             returnDetails: Boolean (Optional)
+     *             locales (Optional): [
+     *                 String (Optional)
+     *             ]
+     *             enableOcr: Boolean (Optional)
+     *             enableLayout: Boolean (Optional)
+     *             enableFigureDescription: Boolean (Optional)
+     *             enableFigureAnalysis: Boolean (Optional)
+     *             enableFormula: Boolean (Optional)
+     *             tableFormat: String(html/markdown) (Optional)
+     *             chartFormat: String(chartJs/markdown) (Optional)
+     *             annotationFormat: String(none/markdown) (Optional)
+     *             disableFaceBlurring: Boolean (Optional)
+     *             estimateFieldSourceAndConfidence: Boolean (Optional)
+     *             contentCategories (Optional): {
+     *                 String (Required): {
+     *                     description: String (Optional)
+     *                     analyzerId: String (Optional)
+     *                     analyzer (Optional): (recursive schema, see analyzer above)
+     *                 }
+     *             }
+     *             enableSegment: Boolean (Optional)
+     *             segmentPerPage: Boolean (Optional)
+     *             omitContent: Boolean (Optional)
+     *         }
+     *         fieldSchema (Optional): {
+     *             name: String (Optional)
+     *             description: String (Optional)
+     *             fields (Optional, Required on create): {
+     *                 String (Required): {
+     *                     method: String(generate/extract/classify) (Optional)
+     *                     type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                     description: String (Optional)
+     *                     items (Optional): (recursive schema, see items above)
+     *                     properties (Optional): {
+     *                         String (Required): (recursive schema, see String above)
+     *                     }
+     *                     examples (Optional): [
+     *                         String (Optional)
+     *                     ]
+     *                     enum (Optional): [
+     *                         String (Optional)
+     *                     ]
+     *                     enumDescriptions (Optional): {
+     *                         String: String (Required)
+     *                     }
+     *                     $ref: String (Optional)
+     *                     estimateSourceAndConfidence: Boolean (Optional)
+     *                 }
+     *             }
+     *             definitions (Optional): {
+     *                 String (Required): (recursive schema, see String above)
+     *             }
+     *         }
+     *         dynamicFieldSchema: Boolean (Optional)
+     *         processingLocation: String(geography/dataZone/global) (Optional)
+     *         knowledgeSources (Optional): [
+     *              (Optional){
+     *                 kind: String(labeledData) (Required)
+     *             }
+     *         ]
+     *         models (Optional): {
+     *             String: String (Required)
+     *         }
+     *         supportedModels (Optional): {
+     *             completion (Optional): [
+     *                 String (Optional)
+     *             ]
+     *             embedding (Optional): [
+     *                 String (Optional)
+     *             ]
+     *         }
+     *     }
+     *     usage (Optional): {
+     *         documentPagesMinimal: Integer (Optional)
+     *         documentPagesBasic: Integer (Optional)
+     *         documentPagesStandard: Integer (Optional)
+     *         audioHours: Double (Optional)
+     *         videoHours: Double (Optional)
+     *         contextualizationTokens: Integer (Optional)
+     *         tokens (Optional): {
+     *             String: int (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param operationId The unique ID of the operation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the status of an analyzer creation operation along with {@link Response} on successful completion of + * {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getOperationStatusWithResponseAsync(String analyzerId, String operationId, + RequestOptions requestOptions) { + final String accept = "application/json"; + return FluxUtil.withContext(context -> service.getOperationStatus(this.getEndpoint(), + this.getServiceVersion().getVersion(), analyzerId, operationId, accept, requestOptions, context)); + } + + /** + * Get the status of an analyzer creation operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Required)
+     *         description: String (Optional)
+     *         tags (Optional): {
+     *             String: String (Required)
+     *         }
+     *         status: String(creating/ready/deleting/failed) (Required)
+     *         createdAt: OffsetDateTime (Required)
+     *         lastModifiedAt: OffsetDateTime (Required)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         baseAnalyzerId: String (Optional)
+     *         config (Optional): {
+     *             returnDetails: Boolean (Optional)
+     *             locales (Optional): [
+     *                 String (Optional)
+     *             ]
+     *             enableOcr: Boolean (Optional)
+     *             enableLayout: Boolean (Optional)
+     *             enableFigureDescription: Boolean (Optional)
+     *             enableFigureAnalysis: Boolean (Optional)
+     *             enableFormula: Boolean (Optional)
+     *             tableFormat: String(html/markdown) (Optional)
+     *             chartFormat: String(chartJs/markdown) (Optional)
+     *             annotationFormat: String(none/markdown) (Optional)
+     *             disableFaceBlurring: Boolean (Optional)
+     *             estimateFieldSourceAndConfidence: Boolean (Optional)
+     *             contentCategories (Optional): {
+     *                 String (Required): {
+     *                     description: String (Optional)
+     *                     analyzerId: String (Optional)
+     *                     analyzer (Optional): (recursive schema, see analyzer above)
+     *                 }
+     *             }
+     *             enableSegment: Boolean (Optional)
+     *             segmentPerPage: Boolean (Optional)
+     *             omitContent: Boolean (Optional)
+     *         }
+     *         fieldSchema (Optional): {
+     *             name: String (Optional)
+     *             description: String (Optional)
+     *             fields (Optional, Required on create): {
+     *                 String (Required): {
+     *                     method: String(generate/extract/classify) (Optional)
+     *                     type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                     description: String (Optional)
+     *                     items (Optional): (recursive schema, see items above)
+     *                     properties (Optional): {
+     *                         String (Required): (recursive schema, see String above)
+     *                     }
+     *                     examples (Optional): [
+     *                         String (Optional)
+     *                     ]
+     *                     enum (Optional): [
+     *                         String (Optional)
+     *                     ]
+     *                     enumDescriptions (Optional): {
+     *                         String: String (Required)
+     *                     }
+     *                     $ref: String (Optional)
+     *                     estimateSourceAndConfidence: Boolean (Optional)
+     *                 }
+     *             }
+     *             definitions (Optional): {
+     *                 String (Required): (recursive schema, see String above)
+     *             }
+     *         }
+     *         dynamicFieldSchema: Boolean (Optional)
+     *         processingLocation: String(geography/dataZone/global) (Optional)
+     *         knowledgeSources (Optional): [
+     *              (Optional){
+     *                 kind: String(labeledData) (Required)
+     *             }
+     *         ]
+     *         models (Optional): {
+     *             String: String (Required)
+     *         }
+     *         supportedModels (Optional): {
+     *             completion (Optional): [
+     *                 String (Optional)
+     *             ]
+     *             embedding (Optional): [
+     *                 String (Optional)
+     *             ]
+     *         }
+     *     }
+     *     usage (Optional): {
+     *         documentPagesMinimal: Integer (Optional)
+     *         documentPagesBasic: Integer (Optional)
+     *         documentPagesStandard: Integer (Optional)
+     *         audioHours: Double (Optional)
+     *         videoHours: Double (Optional)
+     *         contextualizationTokens: Integer (Optional)
+     *         tokens (Optional): {
+     *             String: int (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param operationId The unique ID of the operation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the status of an analyzer creation operation along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getOperationStatusWithResponse(String analyzerId, String operationId, + RequestOptions requestOptions) { + final String accept = "application/json"; + return service.getOperationStatusSync(this.getEndpoint(), this.getServiceVersion().getVersion(), analyzerId, + operationId, accept, requestOptions, Context.NONE); + } + + /** + * Get the result of an analysis operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     *     usage (Optional): {
+     *         documentPagesMinimal: Integer (Optional)
+     *         documentPagesBasic: Integer (Optional)
+     *         documentPagesStandard: Integer (Optional)
+     *         audioHours: Double (Optional)
+     *         videoHours: Double (Optional)
+     *         contextualizationTokens: Integer (Optional)
+     *         tokens (Optional): {
+     *             String: int (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + * @param operationId The unique ID of the operation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the result of an analysis operation along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getResultWithResponseAsync(String operationId, RequestOptions requestOptions) { + final String accept = "application/json"; + return FluxUtil.withContext(context -> service.getResult(this.getEndpoint(), + this.getServiceVersion().getVersion(), operationId, accept, requestOptions, context)); + } + + /** + * Get the result of an analysis operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     id: String (Required)
+     *     status: String(NotStarted/Running/Succeeded/Failed/Canceled) (Required)
+     *     error (Optional): {
+     *         code: String (Required)
+     *         message: String (Required)
+     *         target: String (Optional)
+     *         details (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         innererror (Optional): {
+     *             code: String (Optional)
+     *             innererror (Optional): (recursive schema, see innererror above)
+     *         }
+     *     }
+     *     result (Optional): {
+     *         analyzerId: String (Optional)
+     *         apiVersion: String (Optional)
+     *         createdAt: OffsetDateTime (Optional)
+     *         warnings (Optional): [
+     *             (recursive schema, see above)
+     *         ]
+     *         stringEncoding: String (Optional)
+     *         contents (Required): [
+     *              (Required){
+     *                 kind: String(document/audioVisual) (Required)
+     *                 mimeType: String (Required)
+     *                 analyzerId: String (Optional)
+     *                 category: String (Optional)
+     *                 path: String (Optional)
+     *                 markdown: String (Optional)
+     *                 fields (Optional): {
+     *                     String (Required): {
+     *                         type: String(string/date/time/number/integer/boolean/array/object/json) (Required)
+     *                         spans (Optional): [
+     *                              (Optional){
+     *                                 offset: int (Required)
+     *                                 length: int (Required)
+     *                             }
+     *                         ]
+     *                         confidence: Double (Optional)
+     *                         source: String (Optional)
+     *                     }
+     *                 }
+     *             }
+     *         ]
+     *     }
+     *     usage (Optional): {
+     *         documentPagesMinimal: Integer (Optional)
+     *         documentPagesBasic: Integer (Optional)
+     *         documentPagesStandard: Integer (Optional)
+     *         audioHours: Double (Optional)
+     *         videoHours: Double (Optional)
+     *         contextualizationTokens: Integer (Optional)
+     *         tokens (Optional): {
+     *             String: int (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + * @param operationId The unique ID of the operation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return the result of an analysis operation along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getResultWithResponse(String operationId, RequestOptions requestOptions) { + final String accept = "application/json"; + return service.getResultSync(this.getEndpoint(), this.getServiceVersion().getVersion(), operationId, accept, + requestOptions, Context.NONE); + } + + /** + * Get a file associated with the result of an analysis operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + * @param operationId Operation identifier. + * @param path File path. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a file associated with the result of an analysis operation along with {@link Response} on successful + * completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getResultFileWithResponseAsync(String operationId, String path, + RequestOptions requestOptions) { + final String accept = "*/*"; + return FluxUtil.withContext(context -> service.getResultFile(this.getEndpoint(), + this.getServiceVersion().getVersion(), operationId, path, accept, requestOptions, context)); + } + + /** + * Get a file associated with the result of an analysis operation. + *

Response Body Schema

+ * + *
+     * {@code
+     * BinaryData
+     * }
+     * 
+ * + * @param operationId Operation identifier. + * @param path File path. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a file associated with the result of an analysis operation along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getResultFileWithResponse(String operationId, String path, + RequestOptions requestOptions) { + final String accept = "*/*"; + return service.getResultFileSync(this.getEndpoint(), this.getServiceVersion().getVersion(), operationId, path, + accept, requestOptions, Context.NONE); + } + + /** + * Get authorization for copying this analyzer to another location. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     targetAzureResourceId: String (Required)
+     *     targetRegion: String (Optional)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     source: String (Required)
+     *     targetAzureResourceId: String (Required)
+     *     expiresAt: OffsetDateTime (Required)
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param grantCopyAuthorizationRequest1 The grantCopyAuthorizationRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return authorization for copying this analyzer to another location along with {@link Response} on successful + * completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> grantCopyAuthorizationWithResponseAsync(String analyzerId, + BinaryData grantCopyAuthorizationRequest1, RequestOptions requestOptions) { + final String contentType = "application/json"; + final String accept = "application/json"; + return FluxUtil.withContext( + context -> service.grantCopyAuthorization(this.getEndpoint(), this.getServiceVersion().getVersion(), + analyzerId, contentType, accept, grantCopyAuthorizationRequest1, requestOptions, context)); + } + + /** + * Get authorization for copying this analyzer to another location. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     targetAzureResourceId: String (Required)
+     *     targetRegion: String (Optional)
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     source: String (Required)
+     *     targetAzureResourceId: String (Required)
+     *     expiresAt: OffsetDateTime (Required)
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param grantCopyAuthorizationRequest1 The grantCopyAuthorizationRequest1 parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return authorization for copying this analyzer to another location along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response grantCopyAuthorizationWithResponse(String analyzerId, + BinaryData grantCopyAuthorizationRequest1, RequestOptions requestOptions) { + final String contentType = "application/json"; + final String accept = "application/json"; + return service.grantCopyAuthorizationSync(this.getEndpoint(), this.getServiceVersion().getVersion(), analyzerId, + contentType, accept, grantCopyAuthorizationRequest1, requestOptions, Context.NONE); + } + + /** + * List analyzers. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return paged collection of ContentAnalyzer items along with {@link PagedResponse} on successful completion of + * {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> listAnalyzersSinglePageAsync(RequestOptions requestOptions) { + final String accept = "application/json"; + return FluxUtil + .withContext(context -> service.listAnalyzers(this.getEndpoint(), this.getServiceVersion().getVersion(), + accept, requestOptions, context)) + .map(res -> new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), + getValues(res.getValue(), "value"), getNextLink(res.getValue(), "nextLink"), null)); + } + + /** + * List analyzers. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return paged collection of ContentAnalyzer items as paginated response with {@link PagedFlux}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedFlux listAnalyzersAsync(RequestOptions requestOptions) { + RequestOptions requestOptionsForNextPage = new RequestOptions(); + requestOptionsForNextPage.setContext( + requestOptions != null && requestOptions.getContext() != null ? requestOptions.getContext() : Context.NONE); + return new PagedFlux<>(() -> listAnalyzersSinglePageAsync(requestOptions), + nextLink -> listAnalyzersNextSinglePageAsync(nextLink, requestOptionsForNextPage)); + } + + /** + * List analyzers. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return paged collection of ContentAnalyzer items along with {@link PagedResponse}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private PagedResponse listAnalyzersSinglePage(RequestOptions requestOptions) { + final String accept = "application/json"; + Response res = service.listAnalyzersSync(this.getEndpoint(), this.getServiceVersion().getVersion(), + accept, requestOptions, Context.NONE); + return new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), + getValues(res.getValue(), "value"), getNextLink(res.getValue(), "nextLink"), null); + } + + /** + * List analyzers. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return paged collection of ContentAnalyzer items as paginated response with {@link PagedIterable}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedIterable listAnalyzers(RequestOptions requestOptions) { + RequestOptions requestOptionsForNextPage = new RequestOptions(); + requestOptionsForNextPage.setContext( + requestOptions != null && requestOptions.getContext() != null ? requestOptions.getContext() : Context.NONE); + return new PagedIterable<>(() -> listAnalyzersSinglePage(requestOptions), + nextLink -> listAnalyzersNextSinglePage(nextLink, requestOptionsForNextPage)); + } + + /** + * Update analyzer properties. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer that extracts content and fields from multimodal documents along with {@link Response} on + * successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> updateAnalyzerWithResponseAsync(String analyzerId, BinaryData resource, + RequestOptions requestOptions) { + final String contentType = "application/merge-patch+json"; + final String accept = "application/json"; + return FluxUtil.withContext(context -> service.updateAnalyzer(this.getEndpoint(), + this.getServiceVersion().getVersion(), analyzerId, contentType, accept, resource, requestOptions, context)); + } + + /** + * Update analyzer properties. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param analyzerId The unique identifier of the analyzer. + * @param resource The resource instance. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return analyzer that extracts content and fields from multimodal documents along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response updateAnalyzerWithResponse(String analyzerId, BinaryData resource, + RequestOptions requestOptions) { + final String contentType = "application/merge-patch+json"; + final String accept = "application/json"; + return service.updateAnalyzerSync(this.getEndpoint(), this.getServiceVersion().getVersion(), analyzerId, + contentType, accept, resource, requestOptions, Context.NONE); + } + + /** + * Return default settings for this Content Understanding resource. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Optional): {
+     *          (Optional): {
+     *             String: String (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Required): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + * @param updateDefaultsRequest The updateDefaultsRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return default settings for this Content Understanding resource along with {@link Response} on successful + * completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> updateDefaultsWithResponseAsync(BinaryData updateDefaultsRequest, + RequestOptions requestOptions) { + final String contentType = "application/merge-patch+json"; + final String accept = "application/json"; + return FluxUtil + .withContext(context -> service.updateDefaults(this.getEndpoint(), this.getServiceVersion().getVersion(), + contentType, accept, updateDefaultsRequest, requestOptions, context)); + } + + /** + * Return default settings for this Content Understanding resource. + *

Request Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Optional): {
+     *          (Optional): {
+     *             String: String (Required)
+     *         }
+     *     }
+     * }
+     * }
+     * 
+ * + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     modelDeployments (Required): {
+     *         String: String (Required)
+     *     }
+     * }
+     * }
+     * 
+ * + * @param updateDefaultsRequest The updateDefaultsRequest parameter. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return default settings for this Content Understanding resource along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response updateDefaultsWithResponse(BinaryData updateDefaultsRequest, + RequestOptions requestOptions) { + final String contentType = "application/merge-patch+json"; + final String accept = "application/json"; + return service.updateDefaultsSync(this.getEndpoint(), this.getServiceVersion().getVersion(), contentType, + accept, updateDefaultsRequest, requestOptions, Context.NONE); + } + + /** + * Get the next page of items. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param nextLink The URL to get the next list of items. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return paged collection of ContentAnalyzer items along with {@link PagedResponse} on successful completion of + * {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> listAnalyzersNextSinglePageAsync(String nextLink, + RequestOptions requestOptions) { + final String accept = "application/json"; + return FluxUtil + .withContext( + context -> service.listAnalyzersNext(nextLink, this.getEndpoint(), accept, requestOptions, context)) + .map(res -> new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), + getValues(res.getValue(), "value"), getNextLink(res.getValue(), "nextLink"), null)); + } + + /** + * Get the next page of items. + *

Response Body Schema

+ * + *
+     * {@code
+     * {
+     *     analyzerId: String (Required)
+     *     description: String (Optional)
+     *     tags (Optional): {
+     *         String: String (Required)
+     *     }
+     *     status: String(creating/ready/deleting/failed) (Required)
+     *     createdAt: OffsetDateTime (Required)
+     *     lastModifiedAt: OffsetDateTime (Required)
+     *     warnings (Optional): [
+     *          (Optional){
+     *             code: String (Required)
+     *             message: String (Required)
+     *             target: String (Optional)
+     *             details (Optional): [
+     *                 (recursive schema, see above)
+     *             ]
+     *             innererror (Optional): {
+     *                 code: String (Optional)
+     *                 innererror (Optional): (recursive schema, see innererror above)
+     *             }
+     *         }
+     *     ]
+     *     baseAnalyzerId: String (Optional)
+     *     config (Optional): {
+     *         returnDetails: Boolean (Optional)
+     *         locales (Optional): [
+     *             String (Optional)
+     *         ]
+     *         enableOcr: Boolean (Optional)
+     *         enableLayout: Boolean (Optional)
+     *         enableFigureDescription: Boolean (Optional)
+     *         enableFigureAnalysis: Boolean (Optional)
+     *         enableFormula: Boolean (Optional)
+     *         tableFormat: String(html/markdown) (Optional)
+     *         chartFormat: String(chartJs/markdown) (Optional)
+     *         annotationFormat: String(none/markdown) (Optional)
+     *         disableFaceBlurring: Boolean (Optional)
+     *         estimateFieldSourceAndConfidence: Boolean (Optional)
+     *         contentCategories (Optional): {
+     *             String (Required): {
+     *                 description: String (Optional)
+     *                 analyzerId: String (Optional)
+     *                 analyzer (Optional): (recursive schema, see analyzer above)
+     *             }
+     *         }
+     *         enableSegment: Boolean (Optional)
+     *         segmentPerPage: Boolean (Optional)
+     *         omitContent: Boolean (Optional)
+     *     }
+     *     fieldSchema (Optional): {
+     *         name: String (Optional)
+     *         description: String (Optional)
+     *         fields (Optional, Required on create): {
+     *             String (Required): {
+     *                 method: String(generate/extract/classify) (Optional)
+     *                 type: String(string/date/time/number/integer/boolean/array/object/json) (Optional)
+     *                 description: String (Optional)
+     *                 items (Optional): (recursive schema, see items above)
+     *                 properties (Optional): {
+     *                     String (Required): (recursive schema, see String above)
+     *                 }
+     *                 examples (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enum (Optional): [
+     *                     String (Optional)
+     *                 ]
+     *                 enumDescriptions (Optional): {
+     *                     String: String (Required)
+     *                 }
+     *                 $ref: String (Optional)
+     *                 estimateSourceAndConfidence: Boolean (Optional)
+     *             }
+     *         }
+     *         definitions (Optional): {
+     *             String (Required): (recursive schema, see String above)
+     *         }
+     *     }
+     *     dynamicFieldSchema: Boolean (Optional)
+     *     processingLocation: String(geography/dataZone/global) (Optional)
+     *     knowledgeSources (Optional): [
+     *          (Optional){
+     *             kind: String(labeledData) (Required)
+     *         }
+     *     ]
+     *     models (Optional): {
+     *         String: String (Required)
+     *     }
+     *     supportedModels (Optional): {
+     *         completion (Optional): [
+     *             String (Optional)
+     *         ]
+     *         embedding (Optional): [
+     *             String (Optional)
+     *         ]
+     *     }
+     * }
+     * }
+     * 
+ * + * @param nextLink The URL to get the next list of items. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return paged collection of ContentAnalyzer items along with {@link PagedResponse}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private PagedResponse listAnalyzersNextSinglePage(String nextLink, RequestOptions requestOptions) { + final String accept = "application/json"; + Response res + = service.listAnalyzersNextSync(nextLink, this.getEndpoint(), accept, requestOptions, Context.NONE); + return new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), + getValues(res.getValue(), "value"), getNextLink(res.getValue(), "nextLink"), null); + } + + private List getValues(BinaryData binaryData, String path) { + try { + Map obj = binaryData.toObject(Map.class); + List values = (List) obj.get(path); + return values.stream().map(BinaryData::fromObject).collect(Collectors.toList()); + } catch (RuntimeException e) { + return null; + } + } + + private String getNextLink(BinaryData binaryData, String path) { + try { + Map obj = binaryData.toObject(Map.class); + return (String) obj.get(path); + } catch (RuntimeException e) { + return null; + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/JsonMergePatchHelper.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/JsonMergePatchHelper.java new file mode 100644 index 000000000000..88e775e92486 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/JsonMergePatchHelper.java @@ -0,0 +1,117 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.implementation; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentCategoryDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.KnowledgeSource; + +/** + * This is the Helper class to enable json merge patch serialization for a model. + */ +public class JsonMergePatchHelper { + private static ContentAnalyzerAccessor contentAnalyzerAccessor; + + public interface ContentAnalyzerAccessor { + ContentAnalyzer prepareModelForJsonMergePatch(ContentAnalyzer contentAnalyzer, boolean jsonMergePatchEnabled); + + boolean isJsonMergePatch(ContentAnalyzer contentAnalyzer); + } + + public static void setContentAnalyzerAccessor(ContentAnalyzerAccessor accessor) { + contentAnalyzerAccessor = accessor; + } + + public static ContentAnalyzerAccessor getContentAnalyzerAccessor() { + return contentAnalyzerAccessor; + } + + private static ContentAnalyzerConfigAccessor contentAnalyzerConfigAccessor; + + public interface ContentAnalyzerConfigAccessor { + ContentAnalyzerConfig prepareModelForJsonMergePatch(ContentAnalyzerConfig contentAnalyzerConfig, + boolean jsonMergePatchEnabled); + + boolean isJsonMergePatch(ContentAnalyzerConfig contentAnalyzerConfig); + } + + public static void setContentAnalyzerConfigAccessor(ContentAnalyzerConfigAccessor accessor) { + contentAnalyzerConfigAccessor = accessor; + } + + public static ContentAnalyzerConfigAccessor getContentAnalyzerConfigAccessor() { + return contentAnalyzerConfigAccessor; + } + + private static ContentCategoryDefinitionAccessor contentCategoryDefinitionAccessor; + + public interface ContentCategoryDefinitionAccessor { + ContentCategoryDefinition prepareModelForJsonMergePatch(ContentCategoryDefinition contentCategoryDefinition, + boolean jsonMergePatchEnabled); + + boolean isJsonMergePatch(ContentCategoryDefinition contentCategoryDefinition); + } + + public static void setContentCategoryDefinitionAccessor(ContentCategoryDefinitionAccessor accessor) { + contentCategoryDefinitionAccessor = accessor; + } + + public static ContentCategoryDefinitionAccessor getContentCategoryDefinitionAccessor() { + return contentCategoryDefinitionAccessor; + } + + private static ContentFieldSchemaAccessor contentFieldSchemaAccessor; + + public interface ContentFieldSchemaAccessor { + ContentFieldSchema prepareModelForJsonMergePatch(ContentFieldSchema contentFieldSchema, + boolean jsonMergePatchEnabled); + + boolean isJsonMergePatch(ContentFieldSchema contentFieldSchema); + } + + public static void setContentFieldSchemaAccessor(ContentFieldSchemaAccessor accessor) { + contentFieldSchemaAccessor = accessor; + } + + public static ContentFieldSchemaAccessor getContentFieldSchemaAccessor() { + return contentFieldSchemaAccessor; + } + + private static ContentFieldDefinitionAccessor contentFieldDefinitionAccessor; + + public interface ContentFieldDefinitionAccessor { + ContentFieldDefinition prepareModelForJsonMergePatch(ContentFieldDefinition contentFieldDefinition, + boolean jsonMergePatchEnabled); + + boolean isJsonMergePatch(ContentFieldDefinition contentFieldDefinition); + } + + public static void setContentFieldDefinitionAccessor(ContentFieldDefinitionAccessor accessor) { + contentFieldDefinitionAccessor = accessor; + } + + public static ContentFieldDefinitionAccessor getContentFieldDefinitionAccessor() { + return contentFieldDefinitionAccessor; + } + + private static KnowledgeSourceAccessor knowledgeSourceAccessor; + + public interface KnowledgeSourceAccessor { + KnowledgeSource prepareModelForJsonMergePatch(KnowledgeSource knowledgeSource, boolean jsonMergePatchEnabled); + + boolean isJsonMergePatch(KnowledgeSource knowledgeSource); + } + + public static void setKnowledgeSourceAccessor(KnowledgeSourceAccessor accessor) { + knowledgeSourceAccessor = accessor; + } + + public static KnowledgeSourceAccessor getKnowledgeSourceAccessor() { + return knowledgeSourceAccessor; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/OperationLocationPollingStrategy.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/OperationLocationPollingStrategy.java new file mode 100644 index 000000000000..00255eddaeb6 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/OperationLocationPollingStrategy.java @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding.implementation; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.core.exception.AzureException; +import com.azure.core.http.HttpHeader; +import com.azure.core.http.rest.Response; +import com.azure.core.util.BinaryData; +import com.azure.core.util.logging.ClientLogger; +import com.azure.core.util.polling.LongRunningOperationStatus; +import com.azure.core.util.polling.OperationResourcePollingStrategy; +import com.azure.core.util.polling.PollResponse; +import com.azure.core.util.polling.PollingContext; +import com.azure.core.util.polling.PollingStrategyOptions; +import com.azure.core.util.serializer.JsonSerializerProviders; +import com.azure.core.util.serializer.ObjectSerializer; +import com.azure.core.util.serializer.TypeReference; +import java.time.Duration; +import java.time.OffsetDateTime; +import reactor.core.publisher.Mono; + +// DO NOT modify this helper class +/** + * Implements an operation location polling strategy, from Operation-Location. + * + * @param the type of the response type from a polling call, or BinaryData if raw response body should be kept + * @param the type of the final result object to deserialize into, or BinaryData if raw response body should be + * kept + */ +public final class OperationLocationPollingStrategy extends OperationResourcePollingStrategy { + + private static final ClientLogger LOGGER = new ClientLogger(OperationLocationPollingStrategy.class); + + private final ObjectSerializer serializer; + + private final String endpoint; + + private final String propertyName; + + /** + * Creates an instance of the operation resource polling strategy. + * + * @param pollingStrategyOptions options to configure this polling strategy. + * @throws NullPointerException if {@code pollingStrategyOptions} is null. + */ + public OperationLocationPollingStrategy(PollingStrategyOptions pollingStrategyOptions) { + this(pollingStrategyOptions, null); + } + + /** + * Creates an instance of the operation resource polling strategy. + * + * @param pollingStrategyOptions options to configure this polling strategy. + * @param propertyName the name of the property to extract final result. + * @throws NullPointerException if {@code pollingStrategyOptions} is null. + */ + public OperationLocationPollingStrategy(PollingStrategyOptions pollingStrategyOptions, String propertyName) { + super(PollingUtils.OPERATION_LOCATION_HEADER, pollingStrategyOptions); + this.propertyName = propertyName; + this.endpoint = pollingStrategyOptions.getEndpoint(); + this.serializer = pollingStrategyOptions.getSerializer() != null + ? pollingStrategyOptions.getSerializer() + : JsonSerializerProviders.createInstance(true); + } + + /** + * {@inheritDoc} + */ + @Override + public Mono> onInitialResponse(Response response, PollingContext pollingContext, + TypeReference pollResponseType) { + // Response is Response + HttpHeader operationLocationHeader = response.getHeaders().get(PollingUtils.OPERATION_LOCATION_HEADER); + if (operationLocationHeader != null) { + pollingContext.setData(PollingUtils.OPERATION_LOCATION_HEADER.getCaseSensitiveName(), + PollingUtils.getAbsolutePath(operationLocationHeader.getValue(), endpoint, LOGGER)); + } + final String httpMethod = response.getRequest().getHttpMethod().name(); + pollingContext.setData(PollingUtils.HTTP_METHOD, httpMethod); + pollingContext.setData(PollingUtils.REQUEST_URL, response.getRequest().getUrl().toString()); + if (response.getStatusCode() == 200 + || response.getStatusCode() == 201 + || response.getStatusCode() == 202 + || response.getStatusCode() == 204) { + final Duration retryAfter + = PollingUtils.getRetryAfterFromHeaders(response.getHeaders(), OffsetDateTime::now); + final Mono> pollResponseMono + = PollingUtils.deserializeResponse((BinaryData) response.getValue(), serializer, pollResponseType) + .onErrorResume(exception -> { + LOGGER.info("Failed to parse initial response."); + return Mono.empty(); + }) + .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)); + return pollResponseMono.switchIfEmpty( + Mono.fromSupplier(() -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))); + } else { + return Mono + .error( + new AzureException(String.format( + "Operation failed or cancelled with status code %d," + + ", '%s' header: %s, and response body: %s", + response.getStatusCode(), PollingUtils.OPERATION_LOCATION_HEADER, operationLocationHeader, + response.getValue()))); + } + } + + /** + * {@inheritDoc} + */ + @Override + public Mono getResult(PollingContext pollingContext, TypeReference resultType) { + if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { + return Mono.error(new AzureException("Long running operation failed.")); + } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { + return Mono.error(new AzureException("Long running operation cancelled.")); + } + if (propertyName != null) { + // take the last poll response body from PollingContext, + // and de-serialize the property as final result + BinaryData latestResponseBody + = BinaryData.fromString(pollingContext.getData(PollingUtils.POLL_RESPONSE_BODY)); + return PollingUtils + .deserializeResponse(latestResponseBody, serializer, PollingUtils.POST_POLL_RESULT_TYPE_REFERENCE) + .flatMap(value -> { + if (value.get(propertyName) != null) { + return BinaryData.fromObjectAsync(value.get(propertyName)) + .flatMap(result -> PollingUtils.deserializeResponse(result, serializer, resultType)); + } else { + return Mono.error(new AzureException("Cannot get final result")); + } + }) + .switchIfEmpty(Mono.error(new AzureException("Cannot get final result"))); + } else { + return super.getResult(pollingContext, resultType); + } + } + + @Override + public Mono> poll(PollingContext pollingContext, TypeReference pollResponseType) { + return super.poll(pollingContext, pollResponseType).map(pollResponse -> { + String operationLocationHeader + = pollingContext.getData(String.valueOf(PollingUtils.OPERATION_LOCATION_HEADER)); + String operationId = null; + if (operationLocationHeader != null) { + operationId = PollingUtils.parseOperationId(operationLocationHeader); + } + if (pollResponse.getValue() instanceof ContentAnalyzerAnalyzeOperationStatus) { + ContentAnalyzerAnalyzeOperationStatus operation + = (ContentAnalyzerAnalyzeOperationStatus) pollResponse.getValue(); + ContentAnalyzerAnalyzeOperationStatusHelper.setOperationId(operation, operationId); + } + return pollResponse; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/PollingUtils.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/PollingUtils.java new file mode 100644 index 000000000000..ea6cda0c4f90 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/PollingUtils.java @@ -0,0 +1,167 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding.implementation; + +import com.azure.core.http.HttpHeaderName; +import com.azure.core.http.HttpHeaders; +import com.azure.core.util.BinaryData; +import com.azure.core.util.CoreUtils; +import com.azure.core.util.DateTimeRfc1123; +import com.azure.core.util.logging.ClientLogger; +import com.azure.core.util.serializer.ObjectSerializer; +import com.azure.core.util.serializer.TypeReference; +import java.net.URI; +import java.net.URISyntaxException; +import java.time.DateTimeException; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.time.temporal.ChronoUnit; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import reactor.core.publisher.Mono; + +// DO NOT modify this helper class +final class PollingUtils { + + public static final TypeReference> POST_POLL_RESULT_TYPE_REFERENCE + = new TypeReference>() { + }; + + public static final HttpHeaderName OPERATION_LOCATION_HEADER = HttpHeaderName.fromString("Operation-Location"); + + public static final String HTTP_METHOD = "httpMethod"; + + public static final String REQUEST_URL = "requestURL"; + + public static final String POLL_RESPONSE_BODY = "pollResponseBody"; + + private static final String FORWARD_SLASH = "/"; + + public static String getAbsolutePath(String path, String endpoint, ClientLogger logger) { + try { + URI uri = new URI(path); + if (!uri.isAbsolute()) { + if (CoreUtils.isNullOrEmpty(endpoint)) { + throw logger.logExceptionAsError(new IllegalArgumentException( + "Relative path requires endpoint to be non-null and non-empty to create an absolute path.")); + } + if (endpoint.endsWith(FORWARD_SLASH) && path.startsWith(FORWARD_SLASH)) { + return endpoint + path.substring(1); + } else if (!endpoint.endsWith(FORWARD_SLASH) && !path.startsWith(FORWARD_SLASH)) { + return endpoint + FORWARD_SLASH + path; + } else { + return endpoint + path; + } + } + } catch (URISyntaxException ex) { + throw logger.logExceptionAsWarning(new IllegalArgumentException("'path' must be a valid URI.", ex)); + } + return path; + } + + public static T deserializeResponseSync(BinaryData binaryData, ObjectSerializer serializer, + TypeReference typeReference) { + T value; + if (binaryData == null) { + value = null; + } else if (typeReference.getJavaClass().isAssignableFrom(BinaryData.class)) { + // T is BinaryData + value = typeReference.getJavaClass().cast(binaryData.toReplayableBinaryData()); + } else { + value = binaryData.toObject(typeReference, serializer); + } + return value; + } + + @SuppressWarnings("unchecked") + public static Mono deserializeResponse(BinaryData binaryData, ObjectSerializer serializer, + TypeReference typeReference) { + Mono value; + if (binaryData == null) { + value = Mono.empty(); + } else if (typeReference.getJavaClass().isAssignableFrom(BinaryData.class)) { + // T is BinaryData + value = (Mono) binaryData.toReplayableBinaryDataAsync(); + } else { + value = binaryData.toObjectAsync(typeReference, serializer); + } + return value; + } + + private static final HttpHeaderName RETRY_AFTER_MS_HEADER = HttpHeaderName.fromString("retry-after-ms"); + + private static final HttpHeaderName X_MS_RETRY_AFTER_MS_HEADER = HttpHeaderName.fromString("x-ms-retry-after-ms"); + + public static Duration getRetryAfterFromHeaders(HttpHeaders headers, Supplier nowSupplier) { + // Found 'x-ms-retry-after-ms' header, use a Duration of milliseconds based on the value. + Duration retryDelay = tryGetRetryDelay(headers, X_MS_RETRY_AFTER_MS_HEADER, s -> tryGetDelayMillis(s)); + if (retryDelay != null) { + return retryDelay; + } + // Found 'retry-after-ms' header, use a Duration of milliseconds based on the value. + retryDelay = tryGetRetryDelay(headers, RETRY_AFTER_MS_HEADER, s -> tryGetDelayMillis(s)); + if (retryDelay != null) { + return retryDelay; + } + // Found 'Retry-After' header. First, attempt to resolve it as a Duration of seconds. If that fails, then + // attempt to resolve it as an HTTP date (RFC1123). + retryDelay = tryGetRetryDelay(headers, HttpHeaderName.RETRY_AFTER, + headerValue -> tryParseLongOrDateTime(headerValue, nowSupplier)); + // Either the retry delay will have been found or it'll be null, null indicates no retry after. + return retryDelay; + } + + private static Duration tryGetRetryDelay(HttpHeaders headers, HttpHeaderName headerName, + Function delayParser) { + String headerValue = headers.getValue(headerName); + return CoreUtils.isNullOrEmpty(headerValue) ? null : delayParser.apply(headerValue); + } + + private static Duration tryParseLongOrDateTime(String value, Supplier nowSupplier) { + long delaySeconds; + try { + OffsetDateTime retryAfter = new DateTimeRfc1123(value).getDateTime(); + delaySeconds = nowSupplier.get().until(retryAfter, ChronoUnit.SECONDS); + } catch (DateTimeException ex) { + delaySeconds = tryParseLong(value); + } + return (delaySeconds >= 0) ? Duration.ofSeconds(delaySeconds) : null; + } + + private static long tryParseLong(String value) { + try { + return Long.parseLong(value); + } catch (NumberFormatException ex) { + return -1; + } + } + + private static Duration tryGetDelayMillis(String value) { + long delayMillis = tryParseLong(value); + return (delayMillis >= 0) ? Duration.ofMillis(delayMillis) : null; + } + + private static final Pattern OPERATION_ID_PATTERN + = Pattern.compile("[^:]+://[^/]+/contentunderstanding/.+/([^?/]+)"); + + /** + * Parses the operationId from the Operation-Location header. + * + * @param operationLocationHeader the Operation-Location header value. + * @return the operationId, or null if not found. + */ + static String parseOperationId(String operationLocationHeader) { + if (CoreUtils.isNullOrEmpty(operationLocationHeader)) { + return null; + } + Matcher matcher = OPERATION_ID_PATTERN.matcher(operationLocationHeader); + if (matcher.find() && matcher.group(1) != null) { + return matcher.group(1); + } + return null; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/SyncOperationLocationPollingStrategy.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/SyncOperationLocationPollingStrategy.java new file mode 100644 index 000000000000..f0e477e0823f --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/SyncOperationLocationPollingStrategy.java @@ -0,0 +1,147 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding.implementation; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.core.exception.AzureException; +import com.azure.core.http.HttpHeader; +import com.azure.core.http.rest.Response; +import com.azure.core.util.BinaryData; +import com.azure.core.util.logging.ClientLogger; +import com.azure.core.util.polling.LongRunningOperationStatus; +import com.azure.core.util.polling.PollResponse; +import com.azure.core.util.polling.PollingContext; +import com.azure.core.util.polling.PollingStrategyOptions; +import com.azure.core.util.polling.SyncOperationResourcePollingStrategy; +import com.azure.core.util.serializer.JsonSerializerProviders; +import com.azure.core.util.serializer.ObjectSerializer; +import com.azure.core.util.serializer.TypeReference; +import java.io.UncheckedIOException; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.util.Map; + +// DO NOT modify this helper class +/** + * Implements a synchronous operation location polling strategy, from Operation-Location. + * + * @param the type of the response type from a polling call, or BinaryData if raw response body should be kept + * @param the type of the final result object to deserialize into, or BinaryData if raw response body should be + * kept + */ +public final class SyncOperationLocationPollingStrategy extends SyncOperationResourcePollingStrategy { + + private static final ClientLogger LOGGER = new ClientLogger(SyncOperationLocationPollingStrategy.class); + + private final ObjectSerializer serializer; + + private final String endpoint; + + private final String propertyName; + + /** + * Creates an instance of the operation resource polling strategy. + * + * @param pollingStrategyOptions options to configure this polling strategy. + * @throws NullPointerException if {@code pollingStrategyOptions} is null. + */ + public SyncOperationLocationPollingStrategy(PollingStrategyOptions pollingStrategyOptions) { + this(pollingStrategyOptions, null); + } + + /** + * Creates an instance of the operation resource polling strategy. + * + * @param pollingStrategyOptions options to configure this polling strategy. + * @param propertyName the name of the property to extract final result. + * @throws NullPointerException if {@code pollingStrategyOptions} is null. + */ + public SyncOperationLocationPollingStrategy(PollingStrategyOptions pollingStrategyOptions, String propertyName) { + super(PollingUtils.OPERATION_LOCATION_HEADER, pollingStrategyOptions); + this.propertyName = propertyName; + this.endpoint = pollingStrategyOptions.getEndpoint(); + this.serializer = pollingStrategyOptions.getSerializer() != null + ? pollingStrategyOptions.getSerializer() + : JsonSerializerProviders.createInstance(true); + } + + /** + * {@inheritDoc} + */ + @Override + public PollResponse onInitialResponse(Response response, PollingContext pollingContext, + TypeReference pollResponseType) { + // Response is Response + HttpHeader operationLocationHeader = response.getHeaders().get(PollingUtils.OPERATION_LOCATION_HEADER); + if (operationLocationHeader != null) { + pollingContext.setData(PollingUtils.OPERATION_LOCATION_HEADER.getCaseSensitiveName(), + PollingUtils.getAbsolutePath(operationLocationHeader.getValue(), endpoint, LOGGER)); + } + final String httpMethod = response.getRequest().getHttpMethod().name(); + pollingContext.setData(PollingUtils.HTTP_METHOD, httpMethod); + pollingContext.setData(PollingUtils.REQUEST_URL, response.getRequest().getUrl().toString()); + if (response.getStatusCode() == 200 + || response.getStatusCode() == 201 + || response.getStatusCode() == 202 + || response.getStatusCode() == 204) { + final Duration retryAfter + = PollingUtils.getRetryAfterFromHeaders(response.getHeaders(), OffsetDateTime::now); + T initialResponseType = null; + try { + initialResponseType = PollingUtils.deserializeResponseSync((BinaryData) response.getValue(), serializer, + pollResponseType); + } catch (UncheckedIOException e) { + LOGGER.info("Failed to parse initial response."); + } + return new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, initialResponseType, retryAfter); + } + throw LOGGER.logExceptionAsError(new AzureException( + String.format("Operation failed or cancelled with status code %d, '%s' header: %s, and response body: %s", + response.getStatusCode(), PollingUtils.OPERATION_LOCATION_HEADER, operationLocationHeader, + response.getValue()))); + } + + /** + * {@inheritDoc} + */ + public U getResult(PollingContext pollingContext, TypeReference resultType) { + if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { + throw LOGGER.logExceptionAsError(new AzureException("Long running operation failed.")); + } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { + throw LOGGER.logExceptionAsError(new AzureException("Long running operation cancelled.")); + } + if (propertyName != null) { + // take the last poll response body from PollingContext, + // and de-serialize the property as final result + BinaryData latestResponseBody + = BinaryData.fromString(pollingContext.getData(PollingUtils.POLL_RESPONSE_BODY)); + Map pollResult = PollingUtils.deserializeResponseSync(latestResponseBody, serializer, + PollingUtils.POST_POLL_RESULT_TYPE_REFERENCE); + if (pollResult != null && pollResult.get(propertyName) != null) { + return PollingUtils.deserializeResponseSync(BinaryData.fromObject(pollResult.get(propertyName)), + serializer, resultType); + } else { + throw LOGGER.logExceptionAsError(new AzureException("Cannot get final result")); + } + } else { + return super.getResult(pollingContext, resultType); + } + } + + @Override + public PollResponse poll(PollingContext pollingContext, TypeReference pollResponseType) { + PollResponse pollResponse = super.poll(pollingContext, pollResponseType); + String operationLocationHeader = pollingContext.getData(String.valueOf(PollingUtils.OPERATION_LOCATION_HEADER)); + String operationId = null; + if (operationLocationHeader != null) { + operationId = PollingUtils.parseOperationId(operationLocationHeader); + } + if (pollResponse.getValue() instanceof ContentAnalyzerAnalyzeOperationStatus) { + ContentAnalyzerAnalyzeOperationStatus operation + = (ContentAnalyzerAnalyzeOperationStatus) pollResponse.getValue(); + ContentAnalyzerAnalyzeOperationStatusHelper.setOperationId(operation, operationId); + } + return pollResponse; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/AnalyzeRequest1.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/AnalyzeRequest1.java new file mode 100644 index 000000000000..fccd2581ae54 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/AnalyzeRequest1.java @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.implementation.models; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.core.annotation.Fluent; +import com.azure.core.annotation.Generated; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * The AnalyzeRequest1 model. + */ +@Fluent +public final class AnalyzeRequest1 implements JsonSerializable { + /* + * Inputs to analyze. Currently, only pro mode supports multiple inputs. + */ + @Generated + private List inputs; + + /* + * Override default mapping of model names to deployments. + * Ex. { "gpt-4.1": "myGpt41Deployment", "text-embedding-3-large": "myTextEmbedding3LargeDeployment" }. + */ + @Generated + private Map modelDeployments; + + /** + * Creates an instance of AnalyzeRequest1 class. + */ + @Generated + public AnalyzeRequest1() { + } + + /** + * Get the inputs property: Inputs to analyze. Currently, only pro mode supports multiple inputs. + * + * @return the inputs value. + */ + @Generated + public List getInputs() { + return this.inputs; + } + + /** + * Set the inputs property: Inputs to analyze. Currently, only pro mode supports multiple inputs. + * + * @param inputs the inputs value to set. + * @return the AnalyzeRequest1 object itself. + */ + @Generated + public AnalyzeRequest1 setInputs(List inputs) { + this.inputs = inputs; + return this; + } + + /** + * Get the modelDeployments property: Override default mapping of model names to deployments. + * Ex. { "gpt-4.1": "myGpt41Deployment", "text-embedding-3-large": "myTextEmbedding3LargeDeployment" }. + * + * @return the modelDeployments value. + */ + @Generated + public Map getModelDeployments() { + return this.modelDeployments; + } + + /** + * Set the modelDeployments property: Override default mapping of model names to deployments. + * Ex. { "gpt-4.1": "myGpt41Deployment", "text-embedding-3-large": "myTextEmbedding3LargeDeployment" }. + * + * @param modelDeployments the modelDeployments value to set. + * @return the AnalyzeRequest1 object itself. + */ + @Generated + public AnalyzeRequest1 setModelDeployments(Map modelDeployments) { + this.modelDeployments = modelDeployments; + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("inputs", this.inputs, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeMapField("modelDeployments", this.modelDeployments, + (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AnalyzeRequest1 from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AnalyzeRequest1 if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the AnalyzeRequest1. + */ + @Generated + public static AnalyzeRequest1 fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + AnalyzeRequest1 deserializedAnalyzeRequest1 = new AnalyzeRequest1(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("inputs".equals(fieldName)) { + List inputs = reader.readArray(reader1 -> AnalyzeInput.fromJson(reader1)); + deserializedAnalyzeRequest1.inputs = inputs; + } else if ("modelDeployments".equals(fieldName)) { + Map modelDeployments = reader.readMap(reader1 -> reader1.getString()); + deserializedAnalyzeRequest1.modelDeployments = modelDeployments; + } else { + reader.skipChildren(); + } + } + + return deserializedAnalyzeRequest1; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/CopyAnalyzerRequest.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/CopyAnalyzerRequest.java new file mode 100644 index 000000000000..b2740e811da5 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/CopyAnalyzerRequest.java @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.implementation.models; + +import com.azure.core.annotation.Fluent; +import com.azure.core.annotation.Generated; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * The CopyAnalyzerRequest model. + */ +@Fluent +public final class CopyAnalyzerRequest implements JsonSerializable { + /* + * Azure resource ID of the source analyzer location. Defaults to the current resource. + */ + @Generated + private String sourceAzureResourceId; + + /* + * Azure region of the source analyzer location. Defaults to current region. + */ + @Generated + private String sourceRegion; + + /* + * Source analyzer ID. + */ + @Generated + private final String sourceAnalyzerId; + + /** + * Creates an instance of CopyAnalyzerRequest class. + * + * @param sourceAnalyzerId the sourceAnalyzerId value to set. + */ + @Generated + public CopyAnalyzerRequest(String sourceAnalyzerId) { + this.sourceAnalyzerId = sourceAnalyzerId; + } + + /** + * Get the sourceAzureResourceId property: Azure resource ID of the source analyzer location. Defaults to the + * current resource. + * + * @return the sourceAzureResourceId value. + */ + @Generated + public String getSourceAzureResourceId() { + return this.sourceAzureResourceId; + } + + /** + * Set the sourceAzureResourceId property: Azure resource ID of the source analyzer location. Defaults to the + * current resource. + * + * @param sourceAzureResourceId the sourceAzureResourceId value to set. + * @return the CopyAnalyzerRequest object itself. + */ + @Generated + public CopyAnalyzerRequest setSourceAzureResourceId(String sourceAzureResourceId) { + this.sourceAzureResourceId = sourceAzureResourceId; + return this; + } + + /** + * Get the sourceRegion property: Azure region of the source analyzer location. Defaults to current region. + * + * @return the sourceRegion value. + */ + @Generated + public String getSourceRegion() { + return this.sourceRegion; + } + + /** + * Set the sourceRegion property: Azure region of the source analyzer location. Defaults to current region. + * + * @param sourceRegion the sourceRegion value to set. + * @return the CopyAnalyzerRequest object itself. + */ + @Generated + public CopyAnalyzerRequest setSourceRegion(String sourceRegion) { + this.sourceRegion = sourceRegion; + return this; + } + + /** + * Get the sourceAnalyzerId property: Source analyzer ID. + * + * @return the sourceAnalyzerId value. + */ + @Generated + public String getSourceAnalyzerId() { + return this.sourceAnalyzerId; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("sourceAnalyzerId", this.sourceAnalyzerId); + jsonWriter.writeStringField("sourceAzureResourceId", this.sourceAzureResourceId); + jsonWriter.writeStringField("sourceRegion", this.sourceRegion); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of CopyAnalyzerRequest from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of CopyAnalyzerRequest if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the CopyAnalyzerRequest. + */ + @Generated + public static CopyAnalyzerRequest fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String sourceAnalyzerId = null; + String sourceAzureResourceId = null; + String sourceRegion = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("sourceAnalyzerId".equals(fieldName)) { + sourceAnalyzerId = reader.getString(); + } else if ("sourceAzureResourceId".equals(fieldName)) { + sourceAzureResourceId = reader.getString(); + } else if ("sourceRegion".equals(fieldName)) { + sourceRegion = reader.getString(); + } else { + reader.skipChildren(); + } + } + CopyAnalyzerRequest deserializedCopyAnalyzerRequest = new CopyAnalyzerRequest(sourceAnalyzerId); + deserializedCopyAnalyzerRequest.sourceAzureResourceId = sourceAzureResourceId; + deserializedCopyAnalyzerRequest.sourceRegion = sourceRegion; + + return deserializedCopyAnalyzerRequest; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/GrantCopyAuthorizationRequest1.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/GrantCopyAuthorizationRequest1.java new file mode 100644 index 000000000000..b6fd1aae699b --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/GrantCopyAuthorizationRequest1.java @@ -0,0 +1,119 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.implementation.models; + +import com.azure.core.annotation.Fluent; +import com.azure.core.annotation.Generated; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * The GrantCopyAuthorizationRequest1 model. + */ +@Fluent +public final class GrantCopyAuthorizationRequest1 implements JsonSerializable { + /* + * Azure resource ID of the target analyzer location. + */ + @Generated + private final String targetAzureResourceId; + + /* + * Azure region of the target analyzer location. Defaults to current region. + */ + @Generated + private String targetRegion; + + /** + * Creates an instance of GrantCopyAuthorizationRequest1 class. + * + * @param targetAzureResourceId the targetAzureResourceId value to set. + */ + @Generated + public GrantCopyAuthorizationRequest1(String targetAzureResourceId) { + this.targetAzureResourceId = targetAzureResourceId; + } + + /** + * Get the targetAzureResourceId property: Azure resource ID of the target analyzer location. + * + * @return the targetAzureResourceId value. + */ + @Generated + public String getTargetAzureResourceId() { + return this.targetAzureResourceId; + } + + /** + * Get the targetRegion property: Azure region of the target analyzer location. Defaults to current region. + * + * @return the targetRegion value. + */ + @Generated + public String getTargetRegion() { + return this.targetRegion; + } + + /** + * Set the targetRegion property: Azure region of the target analyzer location. Defaults to current region. + * + * @param targetRegion the targetRegion value to set. + * @return the GrantCopyAuthorizationRequest1 object itself. + */ + @Generated + public GrantCopyAuthorizationRequest1 setTargetRegion(String targetRegion) { + this.targetRegion = targetRegion; + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("targetAzureResourceId", this.targetAzureResourceId); + jsonWriter.writeStringField("targetRegion", this.targetRegion); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of GrantCopyAuthorizationRequest1 from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of GrantCopyAuthorizationRequest1 if the JsonReader was pointing to an instance of it, or + * null if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the GrantCopyAuthorizationRequest1. + */ + @Generated + public static GrantCopyAuthorizationRequest1 fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String targetAzureResourceId = null; + String targetRegion = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("targetAzureResourceId".equals(fieldName)) { + targetAzureResourceId = reader.getString(); + } else if ("targetRegion".equals(fieldName)) { + targetRegion = reader.getString(); + } else { + reader.skipChildren(); + } + } + GrantCopyAuthorizationRequest1 deserializedGrantCopyAuthorizationRequest1 + = new GrantCopyAuthorizationRequest1(targetAzureResourceId); + deserializedGrantCopyAuthorizationRequest1.targetRegion = targetRegion; + + return deserializedGrantCopyAuthorizationRequest1; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/package-info.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/package-info.java new file mode 100644 index 000000000000..92b4d68ff8f3 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/models/package-info.java @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +/** + * Package containing the data models for ContentUnderstanding. + * The Content Understanding service extracts content and fields from multimodal input. + */ +package com.azure.ai.contentunderstanding.implementation.models; diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/package-info.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/package-info.java new file mode 100644 index 000000000000..27a6c5afbbed --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/implementation/package-info.java @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +/** + * Package containing the implementations for ContentUnderstanding. + * The Content Understanding service extracts content and fields from multimodal input. + */ +package com.azure.ai.contentunderstanding.implementation; diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AnalyzeInput.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AnalyzeInput.java new file mode 100644 index 000000000000..651ca4b6c765 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AnalyzeInput.java @@ -0,0 +1,222 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Fluent; +import com.azure.core.annotation.Generated; +import com.azure.core.util.CoreUtils; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Additional input to analyze. + */ +@Fluent +public final class AnalyzeInput implements JsonSerializable { + /* + * The URL of the input to analyze. Only one of url or data should be specified. + */ + @Generated + private String url; + + /* + * Raw image bytes. Provide bytes-like object; do not base64-encode. Only one of url or data should be specified. + */ + @Generated + private byte[] data; + + /* + * Name of the input. + */ + @Generated + private String name; + + /* + * The MIME type of the input content. Ex. application/pdf, image/jpeg, etc. + */ + @Generated + private String mimeType; + + /* + * Range of the input to analyze (ex. `1-3,5,9-`). Document content uses 1-based page numbers, while audio visual + * content uses integer milliseconds. + */ + @Generated + private String inputRange; + + /** + * Creates an instance of AnalyzeInput class. + */ + @Generated + public AnalyzeInput() { + } + + /** + * Get the url property: The URL of the input to analyze. Only one of url or data should be specified. + * + * @return the url value. + */ + @Generated + public String getUrl() { + return this.url; + } + + /** + * Set the url property: The URL of the input to analyze. Only one of url or data should be specified. + * + * @param url the url value to set. + * @return the AnalyzeInput object itself. + */ + @Generated + public AnalyzeInput setUrl(String url) { + this.url = url; + return this; + } + + /** + * Get the data property: Raw image bytes. Provide bytes-like object; do not base64-encode. Only one of url or data + * should be specified. + * + * @return the data value. + */ + @Generated + public byte[] getData() { + return CoreUtils.clone(this.data); + } + + /** + * Set the data property: Raw image bytes. Provide bytes-like object; do not base64-encode. Only one of url or data + * should be specified. + * + * @param data the data value to set. + * @return the AnalyzeInput object itself. + */ + @Generated + public AnalyzeInput setData(byte[] data) { + this.data = CoreUtils.clone(data); + return this; + } + + /** + * Get the name property: Name of the input. + * + * @return the name value. + */ + @Generated + public String getName() { + return this.name; + } + + /** + * Set the name property: Name of the input. + * + * @param name the name value to set. + * @return the AnalyzeInput object itself. + */ + @Generated + public AnalyzeInput setName(String name) { + this.name = name; + return this; + } + + /** + * Get the mimeType property: The MIME type of the input content. Ex. application/pdf, image/jpeg, etc. + * + * @return the mimeType value. + */ + @Generated + public String getMimeType() { + return this.mimeType; + } + + /** + * Set the mimeType property: The MIME type of the input content. Ex. application/pdf, image/jpeg, etc. + * + * @param mimeType the mimeType value to set. + * @return the AnalyzeInput object itself. + */ + @Generated + public AnalyzeInput setMimeType(String mimeType) { + this.mimeType = mimeType; + return this; + } + + /** + * Get the inputRange property: Range of the input to analyze (ex. `1-3,5,9-`). Document content uses 1-based page + * numbers, while audio visual content uses integer milliseconds. + * + * @return the inputRange value. + */ + @Generated + public String getInputRange() { + return this.inputRange; + } + + /** + * Set the inputRange property: Range of the input to analyze (ex. `1-3,5,9-`). Document content uses 1-based page + * numbers, while audio visual content uses integer milliseconds. + * + * @param inputRange the inputRange value to set. + * @return the AnalyzeInput object itself. + */ + @Generated + public AnalyzeInput setInputRange(String inputRange) { + this.inputRange = inputRange; + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("url", this.url); + jsonWriter.writeBinaryField("data", this.data); + jsonWriter.writeStringField("name", this.name); + jsonWriter.writeStringField("mimeType", this.mimeType); + jsonWriter.writeStringField("range", this.inputRange); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AnalyzeInput from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AnalyzeInput if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the AnalyzeInput. + */ + @Generated + public static AnalyzeInput fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + AnalyzeInput deserializedAnalyzeInput = new AnalyzeInput(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("url".equals(fieldName)) { + deserializedAnalyzeInput.url = reader.getString(); + } else if ("data".equals(fieldName)) { + deserializedAnalyzeInput.data = reader.getBinary(); + } else if ("name".equals(fieldName)) { + deserializedAnalyzeInput.name = reader.getString(); + } else if ("mimeType".equals(fieldName)) { + deserializedAnalyzeInput.mimeType = reader.getString(); + } else if ("range".equals(fieldName)) { + deserializedAnalyzeInput.inputRange = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedAnalyzeInput; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AnalyzeResult.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AnalyzeResult.java new file mode 100644 index 000000000000..999c4df56eeb --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AnalyzeResult.java @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.core.models.ResponseError; +import com.azure.core.util.CoreUtils; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.time.OffsetDateTime; +import java.time.format.DateTimeFormatter; +import java.util.List; + +/** + * Analyze operation result. + */ +@Immutable +public final class AnalyzeResult implements JsonSerializable { + /* + * The unique identifier of the analyzer. + */ + @Generated + private String analyzerId; + + /* + * The version of the API used to analyze the document. + */ + @Generated + private String apiVersion; + + /* + * The date and time when the result was created. + */ + @Generated + private OffsetDateTime createdAt; + + /* + * Warnings encountered while analyzing the document. + */ + @Generated + private List warnings; + + /* + * The string encoding format for content spans in the response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`.") + */ + @Generated + private String stringEncoding; + + /* + * The extracted content. + */ + @Generated + private final List contents; + + /** + * Creates an instance of AnalyzeResult class. + * + * @param contents the contents value to set. + */ + @Generated + private AnalyzeResult(List contents) { + this.contents = contents; + } + + /** + * Get the analyzerId property: The unique identifier of the analyzer. + * + * @return the analyzerId value. + */ + @Generated + public String getAnalyzerId() { + return this.analyzerId; + } + + /** + * Get the apiVersion property: The version of the API used to analyze the document. + * + * @return the apiVersion value. + */ + @Generated + public String getApiVersion() { + return this.apiVersion; + } + + /** + * Get the createdAt property: The date and time when the result was created. + * + * @return the createdAt value. + */ + @Generated + public OffsetDateTime getCreatedAt() { + return this.createdAt; + } + + /** + * Get the warnings property: Warnings encountered while analyzing the document. + * + * @return the warnings value. + */ + @Generated + public List getWarnings() { + return this.warnings; + } + + /** + * Get the stringEncoding property: The string encoding format for content spans in the response. + * Possible values are 'codePoint', 'utf16', and `utf8`. Default is `codePoint`."). + * + * @return the stringEncoding value. + */ + @Generated + public String getStringEncoding() { + return this.stringEncoding; + } + + /** + * Get the contents property: The extracted content. + * + * @return the contents value. + */ + @Generated + public List getContents() { + return this.contents; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("contents", this.contents, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("analyzerId", this.analyzerId); + jsonWriter.writeStringField("apiVersion", this.apiVersion); + jsonWriter.writeStringField("createdAt", + this.createdAt == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.createdAt)); + jsonWriter.writeArrayField("warnings", this.warnings, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("stringEncoding", this.stringEncoding); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AnalyzeResult from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AnalyzeResult if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the AnalyzeResult. + */ + @Generated + public static AnalyzeResult fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + List contents = null; + String analyzerId = null; + String apiVersion = null; + OffsetDateTime createdAt = null; + List warnings = null; + String stringEncoding = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("contents".equals(fieldName)) { + contents = reader.readArray(reader1 -> MediaContent.fromJson(reader1)); + } else if ("analyzerId".equals(fieldName)) { + analyzerId = reader.getString(); + } else if ("apiVersion".equals(fieldName)) { + apiVersion = reader.getString(); + } else if ("createdAt".equals(fieldName)) { + createdAt = reader + .getNullable(nonNullReader -> CoreUtils.parseBestOffsetDateTime(nonNullReader.getString())); + } else if ("warnings".equals(fieldName)) { + warnings = reader.readArray(reader1 -> ResponseError.fromJson(reader1)); + } else if ("stringEncoding".equals(fieldName)) { + stringEncoding = reader.getString(); + } else { + reader.skipChildren(); + } + } + AnalyzeResult deserializedAnalyzeResult = new AnalyzeResult(contents); + deserializedAnalyzeResult.analyzerId = analyzerId; + deserializedAnalyzeResult.apiVersion = apiVersion; + deserializedAnalyzeResult.createdAt = createdAt; + deserializedAnalyzeResult.warnings = warnings; + deserializedAnalyzeResult.stringEncoding = stringEncoding; + + return deserializedAnalyzeResult; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AnnotationFormat.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AnnotationFormat.java new file mode 100644 index 000000000000..163e45658a21 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AnnotationFormat.java @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Representation format of annotations in analyze result markdown. + */ +public final class AnnotationFormat extends ExpandableStringEnum { + /** + * Do not represent annotations. + */ + @Generated + public static final AnnotationFormat NONE = fromString("none"); + + /** + * Represent basic annotation information using markdown formatting. + */ + @Generated + public static final AnnotationFormat MARKDOWN = fromString("markdown"); + + /** + * Creates a new instance of AnnotationFormat value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public AnnotationFormat() { + } + + /** + * Creates or finds a AnnotationFormat from its string representation. + * + * @param name a name to look for. + * @return the corresponding AnnotationFormat. + */ + @Generated + public static AnnotationFormat fromString(String name) { + return fromString(name, AnnotationFormat.class); + } + + /** + * Gets known AnnotationFormat values. + * + * @return known AnnotationFormat values. + */ + @Generated + public static Collection values() { + return values(AnnotationFormat.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ArrayField.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ArrayField.java new file mode 100644 index 000000000000..8ffa5e49f3e7 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ArrayField.java @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.core.util.logging.ClientLogger; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Array field extracted from the content. + */ +@Immutable +public final class ArrayField extends ContentField { + + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type = ContentFieldType.ARRAY; + + /* + * Array field value. + */ + @Generated + private List valueArray; + + /** + * Creates an instance of ArrayField class. + */ + @Generated + private ArrayField() { + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + @Override + public ContentFieldType getType() { + return this.type; + } + + /** + * Get the valueArray property: Array field value. + * + * @return the valueArray value. + */ + @Generated + public List getValueArray() { + return this.valueArray; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("spans", getSpans(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("confidence", getConfidence()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeArrayField("valueArray", this.valueArray, (writer, element) -> writer.writeJson(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ArrayField from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ArrayField if the JsonReader was pointing to an instance of it, or null if it was pointing + * to JSON null. + * @throws IOException If an error occurs while reading the ArrayField. + */ + @Generated + public static ArrayField fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + ArrayField deserializedArrayField = new ArrayField(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + if ("spans".equals(fieldName)) { + List spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + deserializedArrayField.setSpans(spans); + } else if ("confidence".equals(fieldName)) { + deserializedArrayField.setConfidence(reader.getNullable(JsonReader::getDouble)); + } else if ("source".equals(fieldName)) { + deserializedArrayField.setSource(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedArrayField.type = ContentFieldType.fromString(reader.getString()); + } else if ("valueArray".equals(fieldName)) { + List valueArray = reader.readArray(reader1 -> ContentField.fromJson(reader1)); + deserializedArrayField.valueArray = valueArray; + } else { + reader.skipChildren(); + } + } + return deserializedArrayField; + }); + } + + private static final ClientLogger LOGGER = new ClientLogger(ArrayField.class); + + /** + * Gets the number of items in the array. + * + * @return the number of items in the array, or 0 if the array is null. + */ + public int size() { + return getValueArray() != null ? getValueArray().size() : 0; + } + + /** + * Gets a field from the array by index. + * + * @param index The zero-based index of the field to retrieve. + * @return The field at the specified index. + * @throws IndexOutOfBoundsException if the index is out of range. + */ + public ContentField get(int index) { + if (getValueArray() == null || index < 0 || index >= getValueArray().size()) { + throw LOGGER.logThrowableAsError(new IndexOutOfBoundsException( + "Index " + index + " is out of range. Array has " + size() + " elements.")); + } + return getValueArray().get(index); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AudioVisualContent.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AudioVisualContent.java new file mode 100644 index 000000000000..1fe56f5ea66b --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AudioVisualContent.java @@ -0,0 +1,294 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * Audio visual content. Ex. audio/wav, video/mp4. + */ +@Immutable +public final class AudioVisualContent extends MediaContent { + + /* + * Content kind. + */ + @Generated + private MediaContentKind kind = MediaContentKind.AUDIO_VISUAL; + + /* + * Start time of the content in milliseconds. + */ + @Generated + private final long startTimeMs; + + /* + * End time of the content in milliseconds. + */ + @Generated + private final long endTimeMs; + + /* + * Width of each video frame in pixels, if applicable. + */ + @Generated + private Integer width; + + /* + * Height of each video frame in pixels, if applicable. + */ + @Generated + private Integer height; + + /* + * List of camera shot changes in the video, represented by its timestamp in milliseconds. Only if returnDetails is + * true. + */ + @Generated + private List cameraShotTimesMs; + + /* + * List of key frames in the video, represented by its timestamp in milliseconds. Only if returnDetails is true. + */ + @Generated + private List keyFrameTimesMs; + + /* + * List of transcript phrases. Only if returnDetails is true. + */ + @Generated + private List transcriptPhrases; + + /* + * List of detected content segments. Only if enableSegment is true. + */ + @Generated + private List segments; + + /** + * Creates an instance of AudioVisualContent class. + * + * @param mimeType the mimeType value to set. + * @param startTimeMs the startTimeMs value to set. + * @param endTimeMs the endTimeMs value to set. + */ + @Generated + private AudioVisualContent(String mimeType, long startTimeMs, long endTimeMs) { + super(mimeType); + this.startTimeMs = startTimeMs; + this.endTimeMs = endTimeMs; + } + + /** + * Get the kind property: Content kind. + * + * @return the kind value. + */ + @Generated + @Override + public MediaContentKind getKind() { + return this.kind; + } + + /** + * Get the startTimeMs property: Start time of the content in milliseconds. + * + * @return the startTimeMs value. + */ + @Generated + public long getStartTimeMs() { + return this.startTimeMs; + } + + /** + * Get the endTimeMs property: End time of the content in milliseconds. + * + * @return the endTimeMs value. + */ + @Generated + public long getEndTimeMs() { + return this.endTimeMs; + } + + /** + * Get the width property: Width of each video frame in pixels, if applicable. + * + * @return the width value. + */ + @Generated + public Integer getWidth() { + return this.width; + } + + /** + * Get the height property: Height of each video frame in pixels, if applicable. + * + * @return the height value. + */ + @Generated + public Integer getHeight() { + return this.height; + } + + /** + * Get the cameraShotTimesMs property: List of camera shot changes in the video, represented by its timestamp in + * milliseconds. Only if returnDetails is true. + * + * @return the cameraShotTimesMs value. + */ + @Generated + public List getCameraShotTimesMs() { + return this.cameraShotTimesMs; + } + + /** + * Get the keyFrameTimesMs property: List of key frames in the video, represented by its timestamp in milliseconds. + * Only if returnDetails is true. + * + * @return the keyFrameTimesMs value. + */ + @Generated + public List getKeyFrameTimesMs() { + return this.keyFrameTimesMs; + } + + /** + * Get the transcriptPhrases property: List of transcript phrases. Only if returnDetails is true. + * + * @return the transcriptPhrases value. + */ + @Generated + public List getTranscriptPhrases() { + return this.transcriptPhrases; + } + + /** + * Get the segments property: List of detected content segments. Only if enableSegment is true. + * + * @return the segments value. + */ + @Generated + public List getSegments() { + return this.segments; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("mimeType", getMimeType()); + jsonWriter.writeStringField("analyzerId", getAnalyzerId()); + jsonWriter.writeStringField("category", getCategory()); + jsonWriter.writeStringField("path", getPath()); + jsonWriter.writeStringField("markdown", getMarkdown()); + jsonWriter.writeMapField("fields", getFields(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeLongField("startTimeMs", this.startTimeMs); + jsonWriter.writeLongField("endTimeMs", this.endTimeMs); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + jsonWriter.writeNumberField("width", this.width); + jsonWriter.writeNumberField("height", this.height); + jsonWriter.writeArrayField("cameraShotTimesMs", this.cameraShotTimesMs, + (writer, element) -> writer.writeLong(element)); + jsonWriter.writeArrayField("keyFrameTimesMs", this.keyFrameTimesMs, + (writer, element) -> writer.writeLong(element)); + jsonWriter.writeArrayField("transcriptPhrases", this.transcriptPhrases, + (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("segments", this.segments, (writer, element) -> writer.writeJson(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AudioVisualContent from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AudioVisualContent if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the AudioVisualContent. + */ + @Generated + public static AudioVisualContent fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String mimeType = null; + String analyzerId = null; + String category = null; + String path = null; + String markdown = null; + Map fields = null; + long startTimeMs = 0L; + long endTimeMs = 0L; + MediaContentKind kind = MediaContentKind.AUDIO_VISUAL; + Integer width = null; + Integer height = null; + List cameraShotTimesMs = null; + List keyFrameTimesMs = null; + List transcriptPhrases = null; + List segments = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + if ("mimeType".equals(fieldName)) { + mimeType = reader.getString(); + } else if ("analyzerId".equals(fieldName)) { + analyzerId = reader.getString(); + } else if ("category".equals(fieldName)) { + category = reader.getString(); + } else if ("path".equals(fieldName)) { + path = reader.getString(); + } else if ("markdown".equals(fieldName)) { + markdown = reader.getString(); + } else if ("fields".equals(fieldName)) { + fields = reader.readMap(reader1 -> ContentField.fromJson(reader1)); + } else if ("startTimeMs".equals(fieldName)) { + startTimeMs = reader.getLong(); + } else if ("endTimeMs".equals(fieldName)) { + endTimeMs = reader.getLong(); + } else if ("kind".equals(fieldName)) { + kind = MediaContentKind.fromString(reader.getString()); + } else if ("width".equals(fieldName)) { + width = reader.getNullable(JsonReader::getInt); + } else if ("height".equals(fieldName)) { + height = reader.getNullable(JsonReader::getInt); + } else if ("cameraShotTimesMs".equals(fieldName)) { + cameraShotTimesMs = reader.readArray(reader1 -> reader1.getLong()); + } else if ("keyFrameTimesMs".equals(fieldName) || "KeyFrameTimesMs".equals(fieldName)) { + if (keyFrameTimesMs == null) { + keyFrameTimesMs = reader.readArray(reader1 -> reader1.getLong()); + } + } else if ("transcriptPhrases".equals(fieldName)) { + transcriptPhrases = reader.readArray(reader1 -> TranscriptPhrase.fromJson(reader1)); + } else if ("segments".equals(fieldName)) { + segments = reader.readArray(reader1 -> AudioVisualContentSegment.fromJson(reader1)); + } else { + reader.skipChildren(); + } + } + AudioVisualContent deserializedAudioVisualContent + = new AudioVisualContent(mimeType, startTimeMs, endTimeMs); + deserializedAudioVisualContent.setAnalyzerId(analyzerId); + deserializedAudioVisualContent.setCategory(category); + deserializedAudioVisualContent.setPath(path); + deserializedAudioVisualContent.setMarkdown(markdown); + deserializedAudioVisualContent.setFields(fields); + deserializedAudioVisualContent.kind = kind; + deserializedAudioVisualContent.width = width; + deserializedAudioVisualContent.height = height; + deserializedAudioVisualContent.cameraShotTimesMs = cameraShotTimesMs; + deserializedAudioVisualContent.keyFrameTimesMs = keyFrameTimesMs; + deserializedAudioVisualContent.transcriptPhrases = transcriptPhrases; + deserializedAudioVisualContent.segments = segments; + return deserializedAudioVisualContent; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AudioVisualContentSegment.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AudioVisualContentSegment.java new file mode 100644 index 000000000000..cbc6f651f849 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/AudioVisualContentSegment.java @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Detected audio/visual content segment. + */ +@Immutable +public final class AudioVisualContentSegment implements JsonSerializable { + /* + * Segment identifier. + */ + @Generated + private final String segmentId; + + /* + * Classified content category. + */ + @Generated + private final String category; + + /* + * Span of the segment in the markdown content. + */ + @Generated + private final ContentSpan span; + + /* + * Start time of the segment in milliseconds. + */ + @Generated + private final long startTimeMs; + + /* + * End time of the segment in milliseconds. + */ + @Generated + private final long endTimeMs; + + /** + * Creates an instance of AudioVisualContentSegment class. + * + * @param segmentId the segmentId value to set. + * @param category the category value to set. + * @param span the span value to set. + * @param startTimeMs the startTimeMs value to set. + * @param endTimeMs the endTimeMs value to set. + */ + @Generated + private AudioVisualContentSegment(String segmentId, String category, ContentSpan span, long startTimeMs, + long endTimeMs) { + this.segmentId = segmentId; + this.category = category; + this.span = span; + this.startTimeMs = startTimeMs; + this.endTimeMs = endTimeMs; + } + + /** + * Get the segmentId property: Segment identifier. + * + * @return the segmentId value. + */ + @Generated + public String getSegmentId() { + return this.segmentId; + } + + /** + * Get the category property: Classified content category. + * + * @return the category value. + */ + @Generated + public String getCategory() { + return this.category; + } + + /** + * Get the span property: Span of the segment in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the startTimeMs property: Start time of the segment in milliseconds. + * + * @return the startTimeMs value. + */ + @Generated + public long getStartTimeMs() { + return this.startTimeMs; + } + + /** + * Get the endTimeMs property: End time of the segment in milliseconds. + * + * @return the endTimeMs value. + */ + @Generated + public long getEndTimeMs() { + return this.endTimeMs; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("segmentId", this.segmentId); + jsonWriter.writeStringField("category", this.category); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeLongField("startTimeMs", this.startTimeMs); + jsonWriter.writeLongField("endTimeMs", this.endTimeMs); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AudioVisualContentSegment from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AudioVisualContentSegment if the JsonReader was pointing to an instance of it, or null if + * it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the AudioVisualContentSegment. + */ + @Generated + public static AudioVisualContentSegment fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String segmentId = null; + String category = null; + ContentSpan span = null; + long startTimeMs = 0L; + long endTimeMs = 0L; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("segmentId".equals(fieldName)) { + segmentId = reader.getString(); + } else if ("category".equals(fieldName)) { + category = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("startTimeMs".equals(fieldName)) { + startTimeMs = reader.getLong(); + } else if ("endTimeMs".equals(fieldName)) { + endTimeMs = reader.getLong(); + } else { + reader.skipChildren(); + } + } + return new AudioVisualContentSegment(segmentId, category, span, startTimeMs, endTimeMs); + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/BooleanField.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/BooleanField.java new file mode 100644 index 000000000000..dbb5382aaad6 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/BooleanField.java @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Boolean field extracted from the content. + */ +@Immutable +public final class BooleanField extends ContentField { + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type = ContentFieldType.BOOLEAN; + + /* + * Boolean field value. + */ + @Generated + private Boolean valueBoolean; + + /** + * Creates an instance of BooleanField class. + */ + @Generated + private BooleanField() { + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + @Override + public ContentFieldType getType() { + return this.type; + } + + /** + * Get the valueBoolean property: Boolean field value. + * + * @return the valueBoolean value. + */ + @Generated + public Boolean isValueBoolean() { + return this.valueBoolean; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("spans", getSpans(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("confidence", getConfidence()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeBooleanField("valueBoolean", this.valueBoolean); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of BooleanField from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of BooleanField if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the BooleanField. + */ + @Generated + public static BooleanField fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + BooleanField deserializedBooleanField = new BooleanField(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("spans".equals(fieldName)) { + List spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + deserializedBooleanField.setSpans(spans); + } else if ("confidence".equals(fieldName)) { + deserializedBooleanField.setConfidence(reader.getNullable(JsonReader::getDouble)); + } else if ("source".equals(fieldName)) { + deserializedBooleanField.setSource(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedBooleanField.type = ContentFieldType.fromString(reader.getString()); + } else if ("valueBoolean".equals(fieldName)) { + deserializedBooleanField.valueBoolean = reader.getNullable(JsonReader::getBoolean); + } else { + reader.skipChildren(); + } + } + + return deserializedBooleanField; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ChartFormat.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ChartFormat.java new file mode 100644 index 000000000000..083e0cad1030 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ChartFormat.java @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Representation format of charts in analyze result markdown. + */ +public final class ChartFormat extends ExpandableStringEnum { + /** + * Represent charts as Chart.js code blocks. + */ + @Generated + public static final ChartFormat CHART_JS = fromString("chartJs"); + + /** + * Represent charts as markdown tables. + */ + @Generated + public static final ChartFormat MARKDOWN = fromString("markdown"); + + /** + * Creates a new instance of ChartFormat value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public ChartFormat() { + } + + /** + * Creates or finds a ChartFormat from its string representation. + * + * @param name a name to look for. + * @return the corresponding ChartFormat. + */ + @Generated + public static ChartFormat fromString(String name) { + return fromString(name, ChartFormat.class); + } + + /** + * Gets known ChartFormat values. + * + * @return known ChartFormat values. + */ + @Generated + public static Collection values() { + return values(ChartFormat.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzer.java new file mode 100644 index 000000000000..557f9d9365a2 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzer.java @@ -0,0 +1,603 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.ai.contentunderstanding.implementation.JsonMergePatchHelper; +import com.azure.core.annotation.Fluent; +import com.azure.core.annotation.Generated; +import com.azure.core.models.ResponseError; +import com.azure.core.util.CoreUtils; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.time.OffsetDateTime; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * Analyzer that extracts content and fields from multimodal documents. + */ +@Fluent +public final class ContentAnalyzer implements JsonSerializable { + /* + * The unique identifier of the analyzer. + */ + @Generated + private String analyzerId; + + /* + * A description of the analyzer. + */ + @Generated + private String description; + + /* + * Tags associated with the analyzer. + */ + @Generated + private Map tags; + + /* + * The status of the analyzer. + */ + @Generated + private ContentAnalyzerStatus status; + + /* + * The date and time when the analyzer was created. + */ + @Generated + private OffsetDateTime createdAt; + + /* + * The date and time when the analyzer was last modified. + */ + @Generated + private OffsetDateTime lastModifiedAt; + + /* + * Warnings encountered while creating the analyzer. + */ + @Generated + private List warnings; + + /* + * The analyzer to incrementally train from. + */ + @Generated + private String baseAnalyzerId; + + /* + * Analyzer configuration settings. + */ + @Generated + private ContentAnalyzerConfig config; + + /* + * The schema of fields to extracted. + */ + @Generated + private ContentFieldSchema fieldSchema; + + /* + * Indicates whether the result may contain additional fields outside of the defined schema. + */ + @Generated + private Boolean dynamicFieldSchema; + + /* + * The location where the data may be processed. Defaults to global. + */ + @Generated + private ProcessingLocation processingLocation; + + /* + * Additional knowledge sources used to enhance the analyzer. + */ + @Generated + private List knowledgeSources; + + /* + * Mapping of model roles to specific model names. + * Ex. { "completion": "gpt-4.1", "embedding": "text-embedding-3-large" }. + */ + @Generated + private Map models; + + /* + * Chat completion and embedding models supported by the analyzer. + */ + @Generated + private SupportedModels supportedModels; + + /** + * Stores updated model property, the value is property name, not serialized name. + */ + @Generated + private final Set updatedProperties = new HashSet<>(); + + @Generated + private boolean jsonMergePatch; + + @Generated + private void serializeAsJsonMergePatch(boolean jsonMergePatch) { + this.jsonMergePatch = jsonMergePatch; + } + + static { + JsonMergePatchHelper.setContentAnalyzerAccessor(new JsonMergePatchHelper.ContentAnalyzerAccessor() { + @Override + public ContentAnalyzer prepareModelForJsonMergePatch(ContentAnalyzer model, boolean jsonMergePatchEnabled) { + model.serializeAsJsonMergePatch(jsonMergePatchEnabled); + return model; + } + + @Override + public boolean isJsonMergePatch(ContentAnalyzer model) { + return model.jsonMergePatch; + } + }); + } + + /** + * Creates an instance of ContentAnalyzer class. + */ + @Generated + public ContentAnalyzer() { + } + + /** + * Get the analyzerId property: The unique identifier of the analyzer. + * + * @return the analyzerId value. + */ + @Generated + public String getAnalyzerId() { + return this.analyzerId; + } + + /** + * Get the description property: A description of the analyzer. + * + * @return the description value. + */ + @Generated + public String getDescription() { + return this.description; + } + + /** + * Set the description property: A description of the analyzer. + * + * @param description the description value to set. + * @return the ContentAnalyzer object itself. + */ + @Generated + public ContentAnalyzer setDescription(String description) { + this.description = description; + this.updatedProperties.add("description"); + return this; + } + + /** + * Get the tags property: Tags associated with the analyzer. + * + * @return the tags value. + */ + @Generated + public Map getTags() { + return this.tags; + } + + /** + * Set the tags property: Tags associated with the analyzer. + * + * @param tags the tags value to set. + * @return the ContentAnalyzer object itself. + */ + @Generated + public ContentAnalyzer setTags(Map tags) { + this.tags = tags; + this.updatedProperties.add("tags"); + return this; + } + + /** + * Get the status property: The status of the analyzer. + * + * @return the status value. + */ + @Generated + public ContentAnalyzerStatus getStatus() { + return this.status; + } + + /** + * Get the createdAt property: The date and time when the analyzer was created. + * + * @return the createdAt value. + */ + @Generated + public OffsetDateTime getCreatedAt() { + return this.createdAt; + } + + /** + * Get the lastModifiedAt property: The date and time when the analyzer was last modified. + * + * @return the lastModifiedAt value. + */ + @Generated + public OffsetDateTime getLastModifiedAt() { + return this.lastModifiedAt; + } + + /** + * Get the warnings property: Warnings encountered while creating the analyzer. + * + * @return the warnings value. + */ + @Generated + public List getWarnings() { + return this.warnings; + } + + /** + * Get the baseAnalyzerId property: The analyzer to incrementally train from. + * + * @return the baseAnalyzerId value. + */ + @Generated + public String getBaseAnalyzerId() { + return this.baseAnalyzerId; + } + + /** + * Set the baseAnalyzerId property: The analyzer to incrementally train from. + * + * @param baseAnalyzerId the baseAnalyzerId value to set. + * @return the ContentAnalyzer object itself. + */ + @Generated + public ContentAnalyzer setBaseAnalyzerId(String baseAnalyzerId) { + this.baseAnalyzerId = baseAnalyzerId; + this.updatedProperties.add("baseAnalyzerId"); + return this; + } + + /** + * Get the config property: Analyzer configuration settings. + * + * @return the config value. + */ + @Generated + public ContentAnalyzerConfig getConfig() { + return this.config; + } + + /** + * Set the config property: Analyzer configuration settings. + * + * @param config the config value to set. + * @return the ContentAnalyzer object itself. + */ + @Generated + public ContentAnalyzer setConfig(ContentAnalyzerConfig config) { + this.config = config; + this.updatedProperties.add("config"); + return this; + } + + /** + * Get the fieldSchema property: The schema of fields to extracted. + * + * @return the fieldSchema value. + */ + @Generated + public ContentFieldSchema getFieldSchema() { + return this.fieldSchema; + } + + /** + * Set the fieldSchema property: The schema of fields to extracted. + * + * @param fieldSchema the fieldSchema value to set. + * @return the ContentAnalyzer object itself. + */ + @Generated + public ContentAnalyzer setFieldSchema(ContentFieldSchema fieldSchema) { + this.fieldSchema = fieldSchema; + this.updatedProperties.add("fieldSchema"); + return this; + } + + /** + * Get the dynamicFieldSchema property: Indicates whether the result may contain additional fields outside of the + * defined schema. + * + * @return the dynamicFieldSchema value. + */ + @Generated + public Boolean isDynamicFieldSchema() { + return this.dynamicFieldSchema; + } + + /** + * Set the dynamicFieldSchema property: Indicates whether the result may contain additional fields outside of the + * defined schema. + * + * @param dynamicFieldSchema the dynamicFieldSchema value to set. + * @return the ContentAnalyzer object itself. + */ + @Generated + public ContentAnalyzer setDynamicFieldSchema(Boolean dynamicFieldSchema) { + this.dynamicFieldSchema = dynamicFieldSchema; + this.updatedProperties.add("dynamicFieldSchema"); + return this; + } + + /** + * Get the processingLocation property: The location where the data may be processed. Defaults to global. + * + * @return the processingLocation value. + */ + @Generated + public ProcessingLocation getProcessingLocation() { + return this.processingLocation; + } + + /** + * Set the processingLocation property: The location where the data may be processed. Defaults to global. + * + * @param processingLocation the processingLocation value to set. + * @return the ContentAnalyzer object itself. + */ + @Generated + public ContentAnalyzer setProcessingLocation(ProcessingLocation processingLocation) { + this.processingLocation = processingLocation; + this.updatedProperties.add("processingLocation"); + return this; + } + + /** + * Get the knowledgeSources property: Additional knowledge sources used to enhance the analyzer. + * + * @return the knowledgeSources value. + */ + @Generated + public List getKnowledgeSources() { + return this.knowledgeSources; + } + + /** + * Set the knowledgeSources property: Additional knowledge sources used to enhance the analyzer. + * + * @param knowledgeSources the knowledgeSources value to set. + * @return the ContentAnalyzer object itself. + */ + @Generated + public ContentAnalyzer setKnowledgeSources(List knowledgeSources) { + this.knowledgeSources = knowledgeSources; + this.updatedProperties.add("knowledgeSources"); + return this; + } + + /** + * Get the models property: Mapping of model roles to specific model names. + * Ex. { "completion": "gpt-4.1", "embedding": "text-embedding-3-large" }. + * + * @return the models value. + */ + @Generated + public Map getModels() { + return this.models; + } + + /** + * Set the models property: Mapping of model roles to specific model names. + * Ex. { "completion": "gpt-4.1", "embedding": "text-embedding-3-large" }. + * + * @param models the models value to set. + * @return the ContentAnalyzer object itself. + */ + @Generated + public ContentAnalyzer setModels(Map models) { + this.models = models; + this.updatedProperties.add("models"); + return this; + } + + /** + * Get the supportedModels property: Chat completion and embedding models supported by the analyzer. + * + * @return the supportedModels value. + */ + @Generated + public SupportedModels getSupportedModels() { + return this.supportedModels; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + if (jsonMergePatch) { + return toJsonMergePatch(jsonWriter); + } else { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("description", this.description); + jsonWriter.writeMapField("tags", this.tags, (writer, element) -> writer.writeString(element)); + jsonWriter.writeStringField("baseAnalyzerId", this.baseAnalyzerId); + jsonWriter.writeJsonField("config", this.config); + jsonWriter.writeJsonField("fieldSchema", this.fieldSchema); + jsonWriter.writeBooleanField("dynamicFieldSchema", this.dynamicFieldSchema); + jsonWriter.writeStringField("processingLocation", + this.processingLocation == null ? null : this.processingLocation.toString()); + jsonWriter.writeArrayField("knowledgeSources", this.knowledgeSources, + (writer, element) -> writer.writeJson(element)); + jsonWriter.writeMapField("models", this.models, (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + } + + @Generated + private JsonWriter toJsonMergePatch(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + if (updatedProperties.contains("description")) { + if (this.description == null) { + jsonWriter.writeNullField("description"); + } else { + jsonWriter.writeStringField("description", this.description); + } + } + if (updatedProperties.contains("tags")) { + if (this.tags == null) { + jsonWriter.writeNullField("tags"); + } else { + jsonWriter.writeMapField("tags", this.tags, (writer, element) -> { + if (element != null) { + writer.writeString(element); + } else { + writer.writeNull(); + } + }); + } + } + if (updatedProperties.contains("baseAnalyzerId")) { + if (this.baseAnalyzerId == null) { + jsonWriter.writeNullField("baseAnalyzerId"); + } else { + jsonWriter.writeStringField("baseAnalyzerId", this.baseAnalyzerId); + } + } + if (updatedProperties.contains("config")) { + if (this.config == null) { + jsonWriter.writeNullField("config"); + } else { + JsonMergePatchHelper.getContentAnalyzerConfigAccessor() + .prepareModelForJsonMergePatch(this.config, true); + jsonWriter.writeJsonField("config", this.config); + JsonMergePatchHelper.getContentAnalyzerConfigAccessor() + .prepareModelForJsonMergePatch(this.config, false); + } + } + if (updatedProperties.contains("fieldSchema")) { + if (this.fieldSchema == null) { + jsonWriter.writeNullField("fieldSchema"); + } else { + JsonMergePatchHelper.getContentFieldSchemaAccessor() + .prepareModelForJsonMergePatch(this.fieldSchema, true); + jsonWriter.writeJsonField("fieldSchema", this.fieldSchema); + JsonMergePatchHelper.getContentFieldSchemaAccessor() + .prepareModelForJsonMergePatch(this.fieldSchema, false); + } + } + if (updatedProperties.contains("dynamicFieldSchema")) { + if (this.dynamicFieldSchema == null) { + jsonWriter.writeNullField("dynamicFieldSchema"); + } else { + jsonWriter.writeBooleanField("dynamicFieldSchema", this.dynamicFieldSchema); + } + } + if (updatedProperties.contains("processingLocation")) { + if (this.processingLocation == null) { + jsonWriter.writeNullField("processingLocation"); + } else { + jsonWriter.writeStringField("processingLocation", this.processingLocation.toString()); + } + } + if (updatedProperties.contains("knowledgeSources")) { + if (this.knowledgeSources == null) { + jsonWriter.writeNullField("knowledgeSources"); + } else { + jsonWriter.writeArrayField("knowledgeSources", this.knowledgeSources, + (writer, element) -> writer.writeJson(element)); + } + } + if (updatedProperties.contains("models")) { + if (this.models == null) { + jsonWriter.writeNullField("models"); + } else { + jsonWriter.writeMapField("models", this.models, (writer, element) -> { + if (element != null) { + writer.writeString(element); + } else { + writer.writeNull(); + } + }); + } + } + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ContentAnalyzer from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ContentAnalyzer if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the ContentAnalyzer. + */ + @Generated + public static ContentAnalyzer fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + ContentAnalyzer deserializedContentAnalyzer = new ContentAnalyzer(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("analyzerId".equals(fieldName)) { + deserializedContentAnalyzer.analyzerId = reader.getString(); + } else if ("status".equals(fieldName)) { + deserializedContentAnalyzer.status = ContentAnalyzerStatus.fromString(reader.getString()); + } else if ("createdAt".equals(fieldName)) { + deserializedContentAnalyzer.createdAt = reader + .getNullable(nonNullReader -> CoreUtils.parseBestOffsetDateTime(nonNullReader.getString())); + } else if ("lastModifiedAt".equals(fieldName)) { + deserializedContentAnalyzer.lastModifiedAt = reader + .getNullable(nonNullReader -> CoreUtils.parseBestOffsetDateTime(nonNullReader.getString())); + } else if ("description".equals(fieldName)) { + deserializedContentAnalyzer.description = reader.getString(); + } else if ("tags".equals(fieldName)) { + Map tags = reader.readMap(reader1 -> reader1.getString()); + deserializedContentAnalyzer.tags = tags; + } else if ("warnings".equals(fieldName)) { + List warnings = reader.readArray(reader1 -> ResponseError.fromJson(reader1)); + deserializedContentAnalyzer.warnings = warnings; + } else if ("baseAnalyzerId".equals(fieldName)) { + deserializedContentAnalyzer.baseAnalyzerId = reader.getString(); + } else if ("config".equals(fieldName)) { + deserializedContentAnalyzer.config = ContentAnalyzerConfig.fromJson(reader); + } else if ("fieldSchema".equals(fieldName)) { + deserializedContentAnalyzer.fieldSchema = ContentFieldSchema.fromJson(reader); + } else if ("dynamicFieldSchema".equals(fieldName)) { + deserializedContentAnalyzer.dynamicFieldSchema = reader.getNullable(JsonReader::getBoolean); + } else if ("processingLocation".equals(fieldName)) { + deserializedContentAnalyzer.processingLocation = ProcessingLocation.fromString(reader.getString()); + } else if ("knowledgeSources".equals(fieldName)) { + List knowledgeSources + = reader.readArray(reader1 -> KnowledgeSource.fromJson(reader1)); + deserializedContentAnalyzer.knowledgeSources = knowledgeSources; + } else if ("models".equals(fieldName)) { + Map models = reader.readMap(reader1 -> reader1.getString()); + deserializedContentAnalyzer.models = models; + } else if ("supportedModels".equals(fieldName)) { + deserializedContentAnalyzer.supportedModels = SupportedModels.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedContentAnalyzer; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerAnalyzeOperationStatus.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerAnalyzeOperationStatus.java new file mode 100644 index 000000000000..285fdac5cd7d --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerAnalyzeOperationStatus.java @@ -0,0 +1,202 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding.models; + +import com.azure.ai.contentunderstanding.implementation.ContentAnalyzerAnalyzeOperationStatusHelper; +import com.azure.core.annotation.Generated; +import com.azure.core.models.ResponseError; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Provides status details for analyze operations. + */ +public final class ContentAnalyzerAnalyzeOperationStatus + implements JsonSerializable { + + static { + ContentAnalyzerAnalyzeOperationStatusHelper.setAccessor( + new ContentAnalyzerAnalyzeOperationStatusHelper.ContentAnalyzerAnalyzeOperationStatusAccessor() { + + @Override + public void setOperationId(ContentAnalyzerAnalyzeOperationStatus status, String operationId) { + status.setOperationId(operationId); + } + }); + } + + /* + * The unique ID of the operation. + */ + @Generated + private final String id; + + /* + * The status of the operation + */ + @Generated + private final OperationState status; + + /* + * Error object that describes the error when status is "Failed". + */ + @Generated + private ResponseError error; + + /* + * The result of the operation. + */ + @Generated + private AnalyzeResult result; + + /* + * Usage details of the analyze operation. + */ + @Generated + private UsageDetails usage; + + /** + * Creates an instance of ContentAnalyzerAnalyzeOperationStatus class. + * + * @param id the id value to set. + * @param status the status value to set. + */ + @Generated + private ContentAnalyzerAnalyzeOperationStatus(String id, OperationState status) { + this.id = id; + this.status = status; + } + + /** + * Get the id property: The unique ID of the operation. + * + * @return the id value. + */ + @Generated + public String getId() { + return this.id; + } + + /** + * Get the status property: The status of the operation. + * + * @return the status value. + */ + @Generated + public OperationState getStatus() { + return this.status; + } + + /** + * Get the error property: Error object that describes the error when status is "Failed". + * + * @return the error value. + */ + @Generated + public ResponseError getError() { + return this.error; + } + + /** + * Get the result property: The result of the operation. + * + * @return the result value. + */ + @Generated + public AnalyzeResult getResult() { + return this.result; + } + + /** + * Get the usage property: Usage details of the analyze operation. + * + * @return the usage value. + */ + @Generated + public UsageDetails getUsage() { + return this.usage; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("id", this.id); + jsonWriter.writeStringField("status", this.status == null ? null : this.status.toString()); + jsonWriter.writeJsonField("error", this.error); + jsonWriter.writeJsonField("result", this.result); + jsonWriter.writeJsonField("usage", this.usage); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ContentAnalyzerAnalyzeOperationStatus from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ContentAnalyzerAnalyzeOperationStatus if the JsonReader was pointing to an instance of it, + * or null if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the ContentAnalyzerAnalyzeOperationStatus. + */ + @Generated + public static ContentAnalyzerAnalyzeOperationStatus fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String id = null; + OperationState status = null; + ResponseError error = null; + AnalyzeResult result = null; + UsageDetails usage = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + if ("id".equals(fieldName)) { + id = reader.getString(); + } else if ("status".equals(fieldName)) { + status = OperationState.fromString(reader.getString()); + } else if ("error".equals(fieldName)) { + error = ResponseError.fromJson(reader); + } else if ("result".equals(fieldName)) { + result = AnalyzeResult.fromJson(reader); + } else if ("usage".equals(fieldName)) { + usage = UsageDetails.fromJson(reader); + } else { + reader.skipChildren(); + } + } + ContentAnalyzerAnalyzeOperationStatus deserializedContentAnalyzerAnalyzeOperationStatus + = new ContentAnalyzerAnalyzeOperationStatus(id, status); + deserializedContentAnalyzerAnalyzeOperationStatus.error = error; + deserializedContentAnalyzerAnalyzeOperationStatus.result = result; + deserializedContentAnalyzerAnalyzeOperationStatus.usage = usage; + return deserializedContentAnalyzerAnalyzeOperationStatus; + }); + } + + private String operationId; + + /** + * Gets the operationId property: The unique ID of the analyze operation. Use this ID with getResultFile() and + * deleteResult() methods. + * + * @return the operationId value. + */ + public String getOperationId() { + return operationId; + } + + /** + * Sets the operationId property: The unique ID of the analyze operation. + * + * @param operationId the operationId value to set. + */ + private void setOperationId(String operationId) { + this.operationId = operationId; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerConfig.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerConfig.java new file mode 100644 index 000000000000..0fba6d3a8705 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerConfig.java @@ -0,0 +1,753 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.ai.contentunderstanding.implementation.JsonMergePatchHelper; +import com.azure.core.annotation.Fluent; +import com.azure.core.annotation.Generated; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * Configuration settings for an analyzer. + */ +@Fluent +public final class ContentAnalyzerConfig implements JsonSerializable { + /* + * Return all content details. + */ + @Generated + private Boolean returnDetails; + + /* + * List of locale hints for speech transcription. + */ + @Generated + private List locales; + + /* + * Enable optical character recognition (OCR). + */ + @Generated + private Boolean enableOcr; + + /* + * Enable layout analysis. + */ + @Generated + private Boolean enableLayout; + + /* + * Enable generation of figure description. + */ + @Generated + private Boolean enableFigureDescription; + + /* + * Enable analysis of figures, such as charts and diagrams. + */ + @Generated + private Boolean enableFigureAnalysis; + + /* + * Enable mathematical formula detection. + */ + @Generated + private Boolean enableFormula; + + /* + * Representation format of tables in analyze result markdown. + */ + @Generated + private TableFormat tableFormat; + + /* + * Representation format of charts in analyze result markdown. + */ + @Generated + private ChartFormat chartFormat; + + /* + * Representation format of annotations in analyze result markdown. + */ + @Generated + private AnnotationFormat annotationFormat; + + /* + * Disable the default blurring of faces for privacy while processing the content. + */ + @Generated + private Boolean disableFaceBlurring; + + /* + * Return field grounding source and confidence. + */ + @Generated + private Boolean estimateFieldSourceAndConfidence; + + /* + * Map of categories to classify the input content(s) against. + */ + @Generated + private Map contentCategories; + + /* + * Enable segmentation of the input by contentCategories. + */ + @Generated + private Boolean enableSegment; + + /* + * Force segmentation of document content by page. + */ + @Generated + private Boolean segmentPerPage; + + /* + * Omit the content for this analyzer from analyze result. + * Only return content(s) from additional analyzers specified in contentCategories, if any. + */ + @Generated + private Boolean omitContent; + + /** + * Stores updated model property, the value is property name, not serialized name. + */ + @Generated + private final Set updatedProperties = new HashSet<>(); + + @Generated + private boolean jsonMergePatch; + + @Generated + private void serializeAsJsonMergePatch(boolean jsonMergePatch) { + this.jsonMergePatch = jsonMergePatch; + } + + static { + JsonMergePatchHelper.setContentAnalyzerConfigAccessor(new JsonMergePatchHelper.ContentAnalyzerConfigAccessor() { + @Override + public ContentAnalyzerConfig prepareModelForJsonMergePatch(ContentAnalyzerConfig model, + boolean jsonMergePatchEnabled) { + model.serializeAsJsonMergePatch(jsonMergePatchEnabled); + return model; + } + + @Override + public boolean isJsonMergePatch(ContentAnalyzerConfig model) { + return model.jsonMergePatch; + } + }); + } + + /** + * Creates an instance of ContentAnalyzerConfig class. + */ + @Generated + public ContentAnalyzerConfig() { + } + + /** + * Get the returnDetails property: Return all content details. + * + * @return the returnDetails value. + */ + @Generated + public Boolean isReturnDetails() { + return this.returnDetails; + } + + /** + * Set the returnDetails property: Return all content details. + * + * @param returnDetails the returnDetails value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setReturnDetails(Boolean returnDetails) { + this.returnDetails = returnDetails; + this.updatedProperties.add("returnDetails"); + return this; + } + + /** + * Get the locales property: List of locale hints for speech transcription. + * + * @return the locales value. + */ + @Generated + public List getLocales() { + return this.locales; + } + + /** + * Set the locales property: List of locale hints for speech transcription. + * + * @param locales the locales value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setLocales(List locales) { + this.locales = locales; + this.updatedProperties.add("locales"); + return this; + } + + /** + * Get the enableOcr property: Enable optical character recognition (OCR). + * + * @return the enableOcr value. + */ + @Generated + public Boolean isEnableOcr() { + return this.enableOcr; + } + + /** + * Set the enableOcr property: Enable optical character recognition (OCR). + * + * @param enableOcr the enableOcr value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setEnableOcr(Boolean enableOcr) { + this.enableOcr = enableOcr; + this.updatedProperties.add("enableOcr"); + return this; + } + + /** + * Get the enableLayout property: Enable layout analysis. + * + * @return the enableLayout value. + */ + @Generated + public Boolean isEnableLayout() { + return this.enableLayout; + } + + /** + * Set the enableLayout property: Enable layout analysis. + * + * @param enableLayout the enableLayout value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setEnableLayout(Boolean enableLayout) { + this.enableLayout = enableLayout; + this.updatedProperties.add("enableLayout"); + return this; + } + + /** + * Get the enableFigureDescription property: Enable generation of figure description. + * + * @return the enableFigureDescription value. + */ + @Generated + public Boolean isEnableFigureDescription() { + return this.enableFigureDescription; + } + + /** + * Set the enableFigureDescription property: Enable generation of figure description. + * + * @param enableFigureDescription the enableFigureDescription value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setEnableFigureDescription(Boolean enableFigureDescription) { + this.enableFigureDescription = enableFigureDescription; + this.updatedProperties.add("enableFigureDescription"); + return this; + } + + /** + * Get the enableFigureAnalysis property: Enable analysis of figures, such as charts and diagrams. + * + * @return the enableFigureAnalysis value. + */ + @Generated + public Boolean isEnableFigureAnalysis() { + return this.enableFigureAnalysis; + } + + /** + * Set the enableFigureAnalysis property: Enable analysis of figures, such as charts and diagrams. + * + * @param enableFigureAnalysis the enableFigureAnalysis value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setEnableFigureAnalysis(Boolean enableFigureAnalysis) { + this.enableFigureAnalysis = enableFigureAnalysis; + this.updatedProperties.add("enableFigureAnalysis"); + return this; + } + + /** + * Get the enableFormula property: Enable mathematical formula detection. + * + * @return the enableFormula value. + */ + @Generated + public Boolean isEnableFormula() { + return this.enableFormula; + } + + /** + * Set the enableFormula property: Enable mathematical formula detection. + * + * @param enableFormula the enableFormula value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setEnableFormula(Boolean enableFormula) { + this.enableFormula = enableFormula; + this.updatedProperties.add("enableFormula"); + return this; + } + + /** + * Get the tableFormat property: Representation format of tables in analyze result markdown. + * + * @return the tableFormat value. + */ + @Generated + public TableFormat getTableFormat() { + return this.tableFormat; + } + + /** + * Set the tableFormat property: Representation format of tables in analyze result markdown. + * + * @param tableFormat the tableFormat value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setTableFormat(TableFormat tableFormat) { + this.tableFormat = tableFormat; + this.updatedProperties.add("tableFormat"); + return this; + } + + /** + * Get the chartFormat property: Representation format of charts in analyze result markdown. + * + * @return the chartFormat value. + */ + @Generated + public ChartFormat getChartFormat() { + return this.chartFormat; + } + + /** + * Set the chartFormat property: Representation format of charts in analyze result markdown. + * + * @param chartFormat the chartFormat value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setChartFormat(ChartFormat chartFormat) { + this.chartFormat = chartFormat; + this.updatedProperties.add("chartFormat"); + return this; + } + + /** + * Get the annotationFormat property: Representation format of annotations in analyze result markdown. + * + * @return the annotationFormat value. + */ + @Generated + public AnnotationFormat getAnnotationFormat() { + return this.annotationFormat; + } + + /** + * Set the annotationFormat property: Representation format of annotations in analyze result markdown. + * + * @param annotationFormat the annotationFormat value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setAnnotationFormat(AnnotationFormat annotationFormat) { + this.annotationFormat = annotationFormat; + this.updatedProperties.add("annotationFormat"); + return this; + } + + /** + * Get the disableFaceBlurring property: Disable the default blurring of faces for privacy while processing the + * content. + * + * @return the disableFaceBlurring value. + */ + @Generated + public Boolean isDisableFaceBlurring() { + return this.disableFaceBlurring; + } + + /** + * Set the disableFaceBlurring property: Disable the default blurring of faces for privacy while processing the + * content. + * + * @param disableFaceBlurring the disableFaceBlurring value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setDisableFaceBlurring(Boolean disableFaceBlurring) { + this.disableFaceBlurring = disableFaceBlurring; + this.updatedProperties.add("disableFaceBlurring"); + return this; + } + + /** + * Get the estimateFieldSourceAndConfidence property: Return field grounding source and confidence. + * + * @return the estimateFieldSourceAndConfidence value. + */ + @Generated + public Boolean isEstimateFieldSourceAndConfidence() { + return this.estimateFieldSourceAndConfidence; + } + + /** + * Set the estimateFieldSourceAndConfidence property: Return field grounding source and confidence. + * + * @param estimateFieldSourceAndConfidence the estimateFieldSourceAndConfidence value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setEstimateFieldSourceAndConfidence(Boolean estimateFieldSourceAndConfidence) { + this.estimateFieldSourceAndConfidence = estimateFieldSourceAndConfidence; + this.updatedProperties.add("estimateFieldSourceAndConfidence"); + return this; + } + + /** + * Get the contentCategories property: Map of categories to classify the input content(s) against. + * + * @return the contentCategories value. + */ + @Generated + public Map getContentCategories() { + return this.contentCategories; + } + + /** + * Set the contentCategories property: Map of categories to classify the input content(s) against. + * + * @param contentCategories the contentCategories value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setContentCategories(Map contentCategories) { + this.contentCategories = contentCategories; + this.updatedProperties.add("contentCategories"); + return this; + } + + /** + * Get the enableSegment property: Enable segmentation of the input by contentCategories. + * + * @return the enableSegment value. + */ + @Generated + public Boolean isEnableSegment() { + return this.enableSegment; + } + + /** + * Set the enableSegment property: Enable segmentation of the input by contentCategories. + * + * @param enableSegment the enableSegment value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setEnableSegment(Boolean enableSegment) { + this.enableSegment = enableSegment; + this.updatedProperties.add("enableSegment"); + return this; + } + + /** + * Get the segmentPerPage property: Force segmentation of document content by page. + * + * @return the segmentPerPage value. + */ + @Generated + public Boolean isSegmentPerPage() { + return this.segmentPerPage; + } + + /** + * Set the segmentPerPage property: Force segmentation of document content by page. + * + * @param segmentPerPage the segmentPerPage value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setSegmentPerPage(Boolean segmentPerPage) { + this.segmentPerPage = segmentPerPage; + this.updatedProperties.add("segmentPerPage"); + return this; + } + + /** + * Get the omitContent property: Omit the content for this analyzer from analyze result. + * Only return content(s) from additional analyzers specified in contentCategories, if any. + * + * @return the omitContent value. + */ + @Generated + public Boolean isOmitContent() { + return this.omitContent; + } + + /** + * Set the omitContent property: Omit the content for this analyzer from analyze result. + * Only return content(s) from additional analyzers specified in contentCategories, if any. + * + * @param omitContent the omitContent value to set. + * @return the ContentAnalyzerConfig object itself. + */ + @Generated + public ContentAnalyzerConfig setOmitContent(Boolean omitContent) { + this.omitContent = omitContent; + this.updatedProperties.add("omitContent"); + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + if (jsonMergePatch) { + return toJsonMergePatch(jsonWriter); + } else { + jsonWriter.writeStartObject(); + jsonWriter.writeBooleanField("returnDetails", this.returnDetails); + jsonWriter.writeArrayField("locales", this.locales, (writer, element) -> writer.writeString(element)); + jsonWriter.writeBooleanField("enableOcr", this.enableOcr); + jsonWriter.writeBooleanField("enableLayout", this.enableLayout); + jsonWriter.writeBooleanField("enableFigureDescription", this.enableFigureDescription); + jsonWriter.writeBooleanField("enableFigureAnalysis", this.enableFigureAnalysis); + jsonWriter.writeBooleanField("enableFormula", this.enableFormula); + jsonWriter.writeStringField("tableFormat", this.tableFormat == null ? null : this.tableFormat.toString()); + jsonWriter.writeStringField("chartFormat", this.chartFormat == null ? null : this.chartFormat.toString()); + jsonWriter.writeStringField("annotationFormat", + this.annotationFormat == null ? null : this.annotationFormat.toString()); + jsonWriter.writeBooleanField("disableFaceBlurring", this.disableFaceBlurring); + jsonWriter.writeBooleanField("estimateFieldSourceAndConfidence", this.estimateFieldSourceAndConfidence); + jsonWriter.writeMapField("contentCategories", this.contentCategories, + (writer, element) -> writer.writeJson(element)); + jsonWriter.writeBooleanField("enableSegment", this.enableSegment); + jsonWriter.writeBooleanField("segmentPerPage", this.segmentPerPage); + jsonWriter.writeBooleanField("omitContent", this.omitContent); + return jsonWriter.writeEndObject(); + } + } + + @Generated + private JsonWriter toJsonMergePatch(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + if (updatedProperties.contains("returnDetails")) { + if (this.returnDetails == null) { + jsonWriter.writeNullField("returnDetails"); + } else { + jsonWriter.writeBooleanField("returnDetails", this.returnDetails); + } + } + if (updatedProperties.contains("locales")) { + if (this.locales == null) { + jsonWriter.writeNullField("locales"); + } else { + jsonWriter.writeArrayField("locales", this.locales, (writer, element) -> writer.writeString(element)); + } + } + if (updatedProperties.contains("enableOcr")) { + if (this.enableOcr == null) { + jsonWriter.writeNullField("enableOcr"); + } else { + jsonWriter.writeBooleanField("enableOcr", this.enableOcr); + } + } + if (updatedProperties.contains("enableLayout")) { + if (this.enableLayout == null) { + jsonWriter.writeNullField("enableLayout"); + } else { + jsonWriter.writeBooleanField("enableLayout", this.enableLayout); + } + } + if (updatedProperties.contains("enableFigureDescription")) { + if (this.enableFigureDescription == null) { + jsonWriter.writeNullField("enableFigureDescription"); + } else { + jsonWriter.writeBooleanField("enableFigureDescription", this.enableFigureDescription); + } + } + if (updatedProperties.contains("enableFigureAnalysis")) { + if (this.enableFigureAnalysis == null) { + jsonWriter.writeNullField("enableFigureAnalysis"); + } else { + jsonWriter.writeBooleanField("enableFigureAnalysis", this.enableFigureAnalysis); + } + } + if (updatedProperties.contains("enableFormula")) { + if (this.enableFormula == null) { + jsonWriter.writeNullField("enableFormula"); + } else { + jsonWriter.writeBooleanField("enableFormula", this.enableFormula); + } + } + if (updatedProperties.contains("tableFormat")) { + if (this.tableFormat == null) { + jsonWriter.writeNullField("tableFormat"); + } else { + jsonWriter.writeStringField("tableFormat", this.tableFormat.toString()); + } + } + if (updatedProperties.contains("chartFormat")) { + if (this.chartFormat == null) { + jsonWriter.writeNullField("chartFormat"); + } else { + jsonWriter.writeStringField("chartFormat", this.chartFormat.toString()); + } + } + if (updatedProperties.contains("annotationFormat")) { + if (this.annotationFormat == null) { + jsonWriter.writeNullField("annotationFormat"); + } else { + jsonWriter.writeStringField("annotationFormat", this.annotationFormat.toString()); + } + } + if (updatedProperties.contains("disableFaceBlurring")) { + if (this.disableFaceBlurring == null) { + jsonWriter.writeNullField("disableFaceBlurring"); + } else { + jsonWriter.writeBooleanField("disableFaceBlurring", this.disableFaceBlurring); + } + } + if (updatedProperties.contains("estimateFieldSourceAndConfidence")) { + if (this.estimateFieldSourceAndConfidence == null) { + jsonWriter.writeNullField("estimateFieldSourceAndConfidence"); + } else { + jsonWriter.writeBooleanField("estimateFieldSourceAndConfidence", this.estimateFieldSourceAndConfidence); + } + } + if (updatedProperties.contains("contentCategories")) { + if (this.contentCategories == null) { + jsonWriter.writeNullField("contentCategories"); + } else { + jsonWriter.writeMapField("contentCategories", this.contentCategories, (writer, element) -> { + if (element != null) { + JsonMergePatchHelper.getContentCategoryDefinitionAccessor() + .prepareModelForJsonMergePatch(element, true); + writer.writeJson(element); + JsonMergePatchHelper.getContentCategoryDefinitionAccessor() + .prepareModelForJsonMergePatch(element, false); + } else { + writer.writeNull(); + } + }); + } + } + if (updatedProperties.contains("enableSegment")) { + if (this.enableSegment == null) { + jsonWriter.writeNullField("enableSegment"); + } else { + jsonWriter.writeBooleanField("enableSegment", this.enableSegment); + } + } + if (updatedProperties.contains("segmentPerPage")) { + if (this.segmentPerPage == null) { + jsonWriter.writeNullField("segmentPerPage"); + } else { + jsonWriter.writeBooleanField("segmentPerPage", this.segmentPerPage); + } + } + if (updatedProperties.contains("omitContent")) { + if (this.omitContent == null) { + jsonWriter.writeNullField("omitContent"); + } else { + jsonWriter.writeBooleanField("omitContent", this.omitContent); + } + } + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ContentAnalyzerConfig from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ContentAnalyzerConfig if the JsonReader was pointing to an instance of it, or null if it + * was pointing to JSON null. + * @throws IOException If an error occurs while reading the ContentAnalyzerConfig. + */ + @Generated + public static ContentAnalyzerConfig fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + ContentAnalyzerConfig deserializedContentAnalyzerConfig = new ContentAnalyzerConfig(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("returnDetails".equals(fieldName)) { + deserializedContentAnalyzerConfig.returnDetails = reader.getNullable(JsonReader::getBoolean); + } else if ("locales".equals(fieldName)) { + List locales = reader.readArray(reader1 -> reader1.getString()); + deserializedContentAnalyzerConfig.locales = locales; + } else if ("enableOcr".equals(fieldName)) { + deserializedContentAnalyzerConfig.enableOcr = reader.getNullable(JsonReader::getBoolean); + } else if ("enableLayout".equals(fieldName)) { + deserializedContentAnalyzerConfig.enableLayout = reader.getNullable(JsonReader::getBoolean); + } else if ("enableFigureDescription".equals(fieldName)) { + deserializedContentAnalyzerConfig.enableFigureDescription + = reader.getNullable(JsonReader::getBoolean); + } else if ("enableFigureAnalysis".equals(fieldName)) { + deserializedContentAnalyzerConfig.enableFigureAnalysis = reader.getNullable(JsonReader::getBoolean); + } else if ("enableFormula".equals(fieldName)) { + deserializedContentAnalyzerConfig.enableFormula = reader.getNullable(JsonReader::getBoolean); + } else if ("tableFormat".equals(fieldName)) { + deserializedContentAnalyzerConfig.tableFormat = TableFormat.fromString(reader.getString()); + } else if ("chartFormat".equals(fieldName)) { + deserializedContentAnalyzerConfig.chartFormat = ChartFormat.fromString(reader.getString()); + } else if ("annotationFormat".equals(fieldName)) { + deserializedContentAnalyzerConfig.annotationFormat + = AnnotationFormat.fromString(reader.getString()); + } else if ("disableFaceBlurring".equals(fieldName)) { + deserializedContentAnalyzerConfig.disableFaceBlurring = reader.getNullable(JsonReader::getBoolean); + } else if ("estimateFieldSourceAndConfidence".equals(fieldName)) { + deserializedContentAnalyzerConfig.estimateFieldSourceAndConfidence + = reader.getNullable(JsonReader::getBoolean); + } else if ("contentCategories".equals(fieldName)) { + Map contentCategories + = reader.readMap(reader1 -> ContentCategoryDefinition.fromJson(reader1)); + deserializedContentAnalyzerConfig.contentCategories = contentCategories; + } else if ("enableSegment".equals(fieldName)) { + deserializedContentAnalyzerConfig.enableSegment = reader.getNullable(JsonReader::getBoolean); + } else if ("segmentPerPage".equals(fieldName)) { + deserializedContentAnalyzerConfig.segmentPerPage = reader.getNullable(JsonReader::getBoolean); + } else if ("omitContent".equals(fieldName)) { + deserializedContentAnalyzerConfig.omitContent = reader.getNullable(JsonReader::getBoolean); + } else { + reader.skipChildren(); + } + } + + return deserializedContentAnalyzerConfig; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerOperationStatus.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerOperationStatus.java new file mode 100644 index 000000000000..141e8c11d4e6 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerOperationStatus.java @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.core.models.ResponseError; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Provides status details for analyzer creation operations. + */ +@Immutable +public final class ContentAnalyzerOperationStatus implements JsonSerializable { + /* + * The unique ID of the operation. + */ + @Generated + private final String id; + + /* + * The status of the operation + */ + @Generated + private final OperationState status; + + /* + * Error object that describes the error when status is "Failed". + */ + @Generated + private ResponseError error; + + /* + * The result of the operation. + */ + @Generated + private ContentAnalyzer result; + + /* + * Usage details of the analyzer creation operation. + */ + @Generated + private UsageDetails usage; + + /** + * Creates an instance of ContentAnalyzerOperationStatus class. + * + * @param id the id value to set. + * @param status the status value to set. + */ + @Generated + private ContentAnalyzerOperationStatus(String id, OperationState status) { + this.id = id; + this.status = status; + } + + /** + * Get the id property: The unique ID of the operation. + * + * @return the id value. + */ + @Generated + public String getId() { + return this.id; + } + + /** + * Get the status property: The status of the operation. + * + * @return the status value. + */ + @Generated + public OperationState getStatus() { + return this.status; + } + + /** + * Get the error property: Error object that describes the error when status is "Failed". + * + * @return the error value. + */ + @Generated + public ResponseError getError() { + return this.error; + } + + /** + * Get the result property: The result of the operation. + * + * @return the result value. + */ + @Generated + public ContentAnalyzer getResult() { + return this.result; + } + + /** + * Get the usage property: Usage details of the analyzer creation operation. + * + * @return the usage value. + */ + @Generated + public UsageDetails getUsage() { + return this.usage; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("id", this.id); + jsonWriter.writeStringField("status", this.status == null ? null : this.status.toString()); + jsonWriter.writeJsonField("error", this.error); + jsonWriter.writeJsonField("result", this.result); + jsonWriter.writeJsonField("usage", this.usage); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ContentAnalyzerOperationStatus from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ContentAnalyzerOperationStatus if the JsonReader was pointing to an instance of it, or + * null if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the ContentAnalyzerOperationStatus. + */ + @Generated + public static ContentAnalyzerOperationStatus fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String id = null; + OperationState status = null; + ResponseError error = null; + ContentAnalyzer result = null; + UsageDetails usage = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("id".equals(fieldName)) { + id = reader.getString(); + } else if ("status".equals(fieldName)) { + status = OperationState.fromString(reader.getString()); + } else if ("error".equals(fieldName)) { + error = ResponseError.fromJson(reader); + } else if ("result".equals(fieldName)) { + result = ContentAnalyzer.fromJson(reader); + } else if ("usage".equals(fieldName)) { + usage = UsageDetails.fromJson(reader); + } else { + reader.skipChildren(); + } + } + ContentAnalyzerOperationStatus deserializedContentAnalyzerOperationStatus + = new ContentAnalyzerOperationStatus(id, status); + deserializedContentAnalyzerOperationStatus.error = error; + deserializedContentAnalyzerOperationStatus.result = result; + deserializedContentAnalyzerOperationStatus.usage = usage; + + return deserializedContentAnalyzerOperationStatus; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerStatus.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerStatus.java new file mode 100644 index 000000000000..117685d41593 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerStatus.java @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Status of a resource. + */ +public final class ContentAnalyzerStatus extends ExpandableStringEnum { + /** + * The resource is being created. + */ + @Generated + public static final ContentAnalyzerStatus CREATING = fromString("creating"); + + /** + * The resource is ready. + */ + @Generated + public static final ContentAnalyzerStatus READY = fromString("ready"); + + /** + * The resource is being deleted. + */ + @Generated + public static final ContentAnalyzerStatus DELETING = fromString("deleting"); + + /** + * The resource failed during creation. + */ + @Generated + public static final ContentAnalyzerStatus FAILED = fromString("failed"); + + /** + * Creates a new instance of ContentAnalyzerStatus value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public ContentAnalyzerStatus() { + } + + /** + * Creates or finds a ContentAnalyzerStatus from its string representation. + * + * @param name a name to look for. + * @return the corresponding ContentAnalyzerStatus. + */ + @Generated + public static ContentAnalyzerStatus fromString(String name) { + return fromString(name, ContentAnalyzerStatus.class); + } + + /** + * Gets known ContentAnalyzerStatus values. + * + * @return known ContentAnalyzerStatus values. + */ + @Generated + public static Collection values() { + return values(ContentAnalyzerStatus.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentCategoryDefinition.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentCategoryDefinition.java new file mode 100644 index 000000000000..e478759b9c27 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentCategoryDefinition.java @@ -0,0 +1,224 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.ai.contentunderstanding.implementation.JsonMergePatchHelper; +import com.azure.core.annotation.Fluent; +import com.azure.core.annotation.Generated; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +/** + * Content category definition. + */ +@Fluent +public final class ContentCategoryDefinition implements JsonSerializable { + /* + * The description of the category. + */ + @Generated + private String description; + + /* + * Optional analyzer used to process the content. + */ + @Generated + private String analyzerId; + + /* + * Optional inline definition of analyzer used to process the content. + */ + @Generated + private ContentAnalyzer analyzer; + + /** + * Stores updated model property, the value is property name, not serialized name. + */ + @Generated + private final Set updatedProperties = new HashSet<>(); + + @Generated + private boolean jsonMergePatch; + + @Generated + private void serializeAsJsonMergePatch(boolean jsonMergePatch) { + this.jsonMergePatch = jsonMergePatch; + } + + static { + JsonMergePatchHelper + .setContentCategoryDefinitionAccessor(new JsonMergePatchHelper.ContentCategoryDefinitionAccessor() { + @Override + public ContentCategoryDefinition prepareModelForJsonMergePatch(ContentCategoryDefinition model, + boolean jsonMergePatchEnabled) { + model.serializeAsJsonMergePatch(jsonMergePatchEnabled); + return model; + } + + @Override + public boolean isJsonMergePatch(ContentCategoryDefinition model) { + return model.jsonMergePatch; + } + }); + } + + /** + * Creates an instance of ContentCategoryDefinition class. + */ + @Generated + public ContentCategoryDefinition() { + } + + /** + * Get the description property: The description of the category. + * + * @return the description value. + */ + @Generated + public String getDescription() { + return this.description; + } + + /** + * Set the description property: The description of the category. + * + * @param description the description value to set. + * @return the ContentCategoryDefinition object itself. + */ + @Generated + public ContentCategoryDefinition setDescription(String description) { + this.description = description; + this.updatedProperties.add("description"); + return this; + } + + /** + * Get the analyzerId property: Optional analyzer used to process the content. + * + * @return the analyzerId value. + */ + @Generated + public String getAnalyzerId() { + return this.analyzerId; + } + + /** + * Set the analyzerId property: Optional analyzer used to process the content. + * + * @param analyzerId the analyzerId value to set. + * @return the ContentCategoryDefinition object itself. + */ + @Generated + public ContentCategoryDefinition setAnalyzerId(String analyzerId) { + this.analyzerId = analyzerId; + this.updatedProperties.add("analyzerId"); + return this; + } + + /** + * Get the analyzer property: Optional inline definition of analyzer used to process the content. + * + * @return the analyzer value. + */ + @Generated + public ContentAnalyzer getAnalyzer() { + return this.analyzer; + } + + /** + * Set the analyzer property: Optional inline definition of analyzer used to process the content. + * + * @param analyzer the analyzer value to set. + * @return the ContentCategoryDefinition object itself. + */ + @Generated + public ContentCategoryDefinition setAnalyzer(ContentAnalyzer analyzer) { + this.analyzer = analyzer; + this.updatedProperties.add("analyzer"); + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + if (jsonMergePatch) { + return toJsonMergePatch(jsonWriter); + } else { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("description", this.description); + jsonWriter.writeStringField("analyzerId", this.analyzerId); + jsonWriter.writeJsonField("analyzer", this.analyzer); + return jsonWriter.writeEndObject(); + } + } + + @Generated + private JsonWriter toJsonMergePatch(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + if (updatedProperties.contains("description")) { + if (this.description == null) { + jsonWriter.writeNullField("description"); + } else { + jsonWriter.writeStringField("description", this.description); + } + } + if (updatedProperties.contains("analyzerId")) { + if (this.analyzerId == null) { + jsonWriter.writeNullField("analyzerId"); + } else { + jsonWriter.writeStringField("analyzerId", this.analyzerId); + } + } + if (updatedProperties.contains("analyzer")) { + if (this.analyzer == null) { + jsonWriter.writeNullField("analyzer"); + } else { + JsonMergePatchHelper.getContentAnalyzerAccessor().prepareModelForJsonMergePatch(this.analyzer, true); + jsonWriter.writeJsonField("analyzer", this.analyzer); + JsonMergePatchHelper.getContentAnalyzerAccessor().prepareModelForJsonMergePatch(this.analyzer, false); + } + } + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ContentCategoryDefinition from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ContentCategoryDefinition if the JsonReader was pointing to an instance of it, or null if + * it was pointing to JSON null. + * @throws IOException If an error occurs while reading the ContentCategoryDefinition. + */ + @Generated + public static ContentCategoryDefinition fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + ContentCategoryDefinition deserializedContentCategoryDefinition = new ContentCategoryDefinition(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("description".equals(fieldName)) { + deserializedContentCategoryDefinition.description = reader.getString(); + } else if ("analyzerId".equals(fieldName)) { + deserializedContentCategoryDefinition.analyzerId = reader.getString(); + } else if ("analyzer".equals(fieldName)) { + deserializedContentCategoryDefinition.analyzer = ContentAnalyzer.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedContentCategoryDefinition; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentField.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentField.java new file mode 100644 index 000000000000..4b9517b93f50 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentField.java @@ -0,0 +1,262 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Field extracted from the content. + */ +@Immutable +public class ContentField implements JsonSerializable { + + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type = ContentFieldType.fromString("ContentField"); + + /* + * Span(s) associated with the field value in the markdown content. + */ + @Generated + private List spans; + + /* + * Confidence of predicting the field value. + */ + @Generated + private Double confidence; + + /* + * Encoded source that identifies the position of the field value in the content. + */ + @Generated + private String source; + + /** + * Creates an instance of ContentField class. + */ + @Generated + protected ContentField() { + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + public ContentFieldType getType() { + return this.type; + } + + /** + * Get the spans property: Span(s) associated with the field value in the markdown content. + * + * @return the spans value. + */ + @Generated + public List getSpans() { + return this.spans; + } + + /** + * Set the spans property: Span(s) associated with the field value in the markdown content. + * + * @param spans the spans value to set. + * @return the ContentField object itself. + */ + @Generated + ContentField setSpans(List spans) { + this.spans = spans; + return this; + } + + /** + * Get the confidence property: Confidence of predicting the field value. + * + * @return the confidence value. + */ + @Generated + public Double getConfidence() { + return this.confidence; + } + + /** + * Set the confidence property: Confidence of predicting the field value. + * + * @param confidence the confidence value to set. + * @return the ContentField object itself. + */ + @Generated + ContentField setConfidence(Double confidence) { + this.confidence = confidence; + return this; + } + + /** + * Get the source property: Encoded source that identifies the position of the field value in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Set the source property: Encoded source that identifies the position of the field value in the content. + * + * @param source the source value to set. + * @return the ContentField object itself. + */ + @Generated + ContentField setSource(String source) { + this.source = source; + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeArrayField("spans", this.spans, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("confidence", this.confidence); + jsonWriter.writeStringField("source", this.source); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ContentField from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ContentField if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the ContentField. + */ + @Generated + public static ContentField fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String discriminatorValue = null; + try (JsonReader readerToUse = reader.bufferObject()) { + // Prepare for reading + readerToUse.nextToken(); + while (readerToUse.nextToken() != JsonToken.END_OBJECT) { + String fieldName = readerToUse.getFieldName(); + readerToUse.nextToken(); + if ("type".equals(fieldName)) { + discriminatorValue = readerToUse.getString(); + break; + } else { + readerToUse.skipChildren(); + } + } + // Use the discriminator value to determine which subtype should be deserialized. + if ("string".equals(discriminatorValue)) { + return StringField.fromJson(readerToUse.reset()); + } else if ("date".equals(discriminatorValue)) { + return DateField.fromJson(readerToUse.reset()); + } else if ("time".equals(discriminatorValue)) { + return TimeField.fromJson(readerToUse.reset()); + } else if ("number".equals(discriminatorValue)) { + return NumberField.fromJson(readerToUse.reset()); + } else if ("integer".equals(discriminatorValue)) { + return IntegerField.fromJson(readerToUse.reset()); + } else if ("boolean".equals(discriminatorValue)) { + return BooleanField.fromJson(readerToUse.reset()); + } else if ("array".equals(discriminatorValue)) { + return ArrayField.fromJson(readerToUse.reset()); + } else if ("object".equals(discriminatorValue)) { + return ObjectField.fromJson(readerToUse.reset()); + } else if ("json".equals(discriminatorValue)) { + return JsonField.fromJson(readerToUse.reset()); + } else { + return fromJsonKnownDiscriminator(readerToUse.reset()); + } + } + }); + } + + @Generated + static ContentField fromJsonKnownDiscriminator(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + ContentField deserializedContentField = new ContentField(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + if ("type".equals(fieldName)) { + deserializedContentField.type = ContentFieldType.fromString(reader.getString()); + } else if ("spans".equals(fieldName)) { + List spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + deserializedContentField.spans = spans; + } else if ("confidence".equals(fieldName)) { + deserializedContentField.confidence = reader.getNullable(JsonReader::getDouble); + } else if ("source".equals(fieldName)) { + deserializedContentField.source = reader.getString(); + } else { + reader.skipChildren(); + } + } + return deserializedContentField; + }); + } + + /** + * Gets the value of the field, regardless of its type. + * Returns the appropriate typed value for each field type: + * - StringField: returns String (from getValueString()) + * - NumberField: returns Double (from getValueNumber()) + * - IntegerField: returns Long (from getValueInteger()) + * - DateField: returns LocalDate (from getValueDate()) + * - TimeField: returns String (from getValueTime()) + * - BooleanField: returns Boolean (from isValueBoolean()) + * - ObjectField: returns Map (from getValueObject()) + * - ArrayField: returns List (from getValueArray()) + * - JsonField: returns String (from getValueJson()) + * + * @return the field value, or null if not available. + */ + public Object getValue() { + if (this instanceof StringField) { + return ((StringField) this).getValueString(); + } + if (this instanceof NumberField) { + return ((NumberField) this).getValueNumber(); + } + if (this instanceof IntegerField) { + return ((IntegerField) this).getValueInteger(); + } + if (this instanceof DateField) { + return ((DateField) this).getValueDate(); + } + if (this instanceof TimeField) { + return ((TimeField) this).getValueTime(); + } + if (this instanceof BooleanField) { + return ((BooleanField) this).isValueBoolean(); + } + if (this instanceof ObjectField) { + return ((ObjectField) this).getValueObject(); + } + if (this instanceof ArrayField) { + return ((ArrayField) this).getValueArray(); + } + if (this instanceof JsonField) { + return ((JsonField) this).getValueJson(); + } + return null; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldDefinition.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldDefinition.java new file mode 100644 index 000000000000..446ccc2f0195 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldDefinition.java @@ -0,0 +1,524 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.ai.contentunderstanding.implementation.JsonMergePatchHelper; +import com.azure.core.annotation.Fluent; +import com.azure.core.annotation.Generated; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * Definition of the field using a JSON Schema like syntax. + */ +@Fluent +public final class ContentFieldDefinition implements JsonSerializable { + /* + * Generation method. + */ + @Generated + private GenerationMethod method; + + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type; + + /* + * Field description. + */ + @Generated + private String description; + + /* + * Field type schema of each array element, if type is array. + */ + @Generated + private ContentFieldDefinition itemDefinition; + + /* + * Named sub-fields, if type is object. + */ + @Generated + private Map properties; + + /* + * Examples of field values. + */ + @Generated + private List examples; + + /* + * Enumeration of possible field values. + */ + @Generated + private List enumProperty; + + /* + * Descriptions for each enumeration value. + */ + @Generated + private Map enumDescriptions; + + /* + * Reference to another field definition. + */ + @Generated + private String ref; + + /* + * Return grounding source and confidence. + */ + @Generated + private Boolean estimateSourceAndConfidence; + + /** + * Stores updated model property, the value is property name, not serialized name. + */ + @Generated + private final Set updatedProperties = new HashSet<>(); + + @Generated + private boolean jsonMergePatch; + + @Generated + private void serializeAsJsonMergePatch(boolean jsonMergePatch) { + this.jsonMergePatch = jsonMergePatch; + } + + static { + JsonMergePatchHelper + .setContentFieldDefinitionAccessor(new JsonMergePatchHelper.ContentFieldDefinitionAccessor() { + @Override + public ContentFieldDefinition prepareModelForJsonMergePatch(ContentFieldDefinition model, + boolean jsonMergePatchEnabled) { + model.serializeAsJsonMergePatch(jsonMergePatchEnabled); + return model; + } + + @Override + public boolean isJsonMergePatch(ContentFieldDefinition model) { + return model.jsonMergePatch; + } + }); + } + + /** + * Creates an instance of ContentFieldDefinition class. + */ + @Generated + public ContentFieldDefinition() { + } + + /** + * Get the method property: Generation method. + * + * @return the method value. + */ + @Generated + public GenerationMethod getMethod() { + return this.method; + } + + /** + * Set the method property: Generation method. + * + * @param method the method value to set. + * @return the ContentFieldDefinition object itself. + */ + @Generated + public ContentFieldDefinition setMethod(GenerationMethod method) { + this.method = method; + this.updatedProperties.add("method"); + return this; + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + public ContentFieldType getType() { + return this.type; + } + + /** + * Set the type property: Semantic data type of the field value. + * + * @param type the type value to set. + * @return the ContentFieldDefinition object itself. + */ + @Generated + public ContentFieldDefinition setType(ContentFieldType type) { + this.type = type; + this.updatedProperties.add("type"); + return this; + } + + /** + * Get the description property: Field description. + * + * @return the description value. + */ + @Generated + public String getDescription() { + return this.description; + } + + /** + * Set the description property: Field description. + * + * @param description the description value to set. + * @return the ContentFieldDefinition object itself. + */ + @Generated + public ContentFieldDefinition setDescription(String description) { + this.description = description; + this.updatedProperties.add("description"); + return this; + } + + /** + * Get the itemDefinition property: Field type schema of each array element, if type is array. + * + * @return the itemDefinition value. + */ + @Generated + public ContentFieldDefinition getItemDefinition() { + return this.itemDefinition; + } + + /** + * Set the itemDefinition property: Field type schema of each array element, if type is array. + * + * @param itemDefinition the itemDefinition value to set. + * @return the ContentFieldDefinition object itself. + */ + @Generated + public ContentFieldDefinition setItemDefinition(ContentFieldDefinition itemDefinition) { + this.itemDefinition = itemDefinition; + this.updatedProperties.add("itemDefinition"); + return this; + } + + /** + * Get the properties property: Named sub-fields, if type is object. + * + * @return the properties value. + */ + @Generated + public Map getProperties() { + return this.properties; + } + + /** + * Set the properties property: Named sub-fields, if type is object. + * + * @param properties the properties value to set. + * @return the ContentFieldDefinition object itself. + */ + @Generated + public ContentFieldDefinition setProperties(Map properties) { + this.properties = properties; + this.updatedProperties.add("properties"); + return this; + } + + /** + * Get the examples property: Examples of field values. + * + * @return the examples value. + */ + @Generated + public List getExamples() { + return this.examples; + } + + /** + * Set the examples property: Examples of field values. + * + * @param examples the examples value to set. + * @return the ContentFieldDefinition object itself. + */ + @Generated + public ContentFieldDefinition setExamples(List examples) { + this.examples = examples; + this.updatedProperties.add("examples"); + return this; + } + + /** + * Get the enumProperty property: Enumeration of possible field values. + * + * @return the enumProperty value. + */ + @Generated + public List getEnumProperty() { + return this.enumProperty; + } + + /** + * Set the enumProperty property: Enumeration of possible field values. + * + * @param enumProperty the enumProperty value to set. + * @return the ContentFieldDefinition object itself. + */ + @Generated + public ContentFieldDefinition setEnumProperty(List enumProperty) { + this.enumProperty = enumProperty; + this.updatedProperties.add("enumProperty"); + return this; + } + + /** + * Get the enumDescriptions property: Descriptions for each enumeration value. + * + * @return the enumDescriptions value. + */ + @Generated + public Map getEnumDescriptions() { + return this.enumDescriptions; + } + + /** + * Set the enumDescriptions property: Descriptions for each enumeration value. + * + * @param enumDescriptions the enumDescriptions value to set. + * @return the ContentFieldDefinition object itself. + */ + @Generated + public ContentFieldDefinition setEnumDescriptions(Map enumDescriptions) { + this.enumDescriptions = enumDescriptions; + this.updatedProperties.add("enumDescriptions"); + return this; + } + + /** + * Get the ref property: Reference to another field definition. + * + * @return the ref value. + */ + @Generated + public String getRef() { + return this.ref; + } + + /** + * Set the ref property: Reference to another field definition. + * + * @param ref the ref value to set. + * @return the ContentFieldDefinition object itself. + */ + @Generated + public ContentFieldDefinition setRef(String ref) { + this.ref = ref; + this.updatedProperties.add("ref"); + return this; + } + + /** + * Get the estimateSourceAndConfidence property: Return grounding source and confidence. + * + * @return the estimateSourceAndConfidence value. + */ + @Generated + public Boolean isEstimateSourceAndConfidence() { + return this.estimateSourceAndConfidence; + } + + /** + * Set the estimateSourceAndConfidence property: Return grounding source and confidence. + * + * @param estimateSourceAndConfidence the estimateSourceAndConfidence value to set. + * @return the ContentFieldDefinition object itself. + */ + @Generated + public ContentFieldDefinition setEstimateSourceAndConfidence(Boolean estimateSourceAndConfidence) { + this.estimateSourceAndConfidence = estimateSourceAndConfidence; + this.updatedProperties.add("estimateSourceAndConfidence"); + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + if (jsonMergePatch) { + return toJsonMergePatch(jsonWriter); + } else { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("method", this.method == null ? null : this.method.toString()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeStringField("description", this.description); + jsonWriter.writeJsonField("items", this.itemDefinition); + jsonWriter.writeMapField("properties", this.properties, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("examples", this.examples, (writer, element) -> writer.writeString(element)); + jsonWriter.writeArrayField("enum", this.enumProperty, (writer, element) -> writer.writeString(element)); + jsonWriter.writeMapField("enumDescriptions", this.enumDescriptions, + (writer, element) -> writer.writeString(element)); + jsonWriter.writeStringField("$ref", this.ref); + jsonWriter.writeBooleanField("estimateSourceAndConfidence", this.estimateSourceAndConfidence); + return jsonWriter.writeEndObject(); + } + } + + @Generated + private JsonWriter toJsonMergePatch(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + if (updatedProperties.contains("method")) { + if (this.method == null) { + jsonWriter.writeNullField("method"); + } else { + jsonWriter.writeStringField("method", this.method.toString()); + } + } + if (updatedProperties.contains("type")) { + if (this.type == null) { + jsonWriter.writeNullField("type"); + } else { + jsonWriter.writeStringField("type", this.type.toString()); + } + } + if (updatedProperties.contains("description")) { + if (this.description == null) { + jsonWriter.writeNullField("description"); + } else { + jsonWriter.writeStringField("description", this.description); + } + } + if (updatedProperties.contains("itemDefinition")) { + if (this.itemDefinition == null) { + jsonWriter.writeNullField("items"); + } else { + JsonMergePatchHelper.getContentFieldDefinitionAccessor() + .prepareModelForJsonMergePatch(this.itemDefinition, true); + jsonWriter.writeJsonField("items", this.itemDefinition); + JsonMergePatchHelper.getContentFieldDefinitionAccessor() + .prepareModelForJsonMergePatch(this.itemDefinition, false); + } + } + if (updatedProperties.contains("properties")) { + if (this.properties == null) { + jsonWriter.writeNullField("properties"); + } else { + jsonWriter.writeMapField("properties", this.properties, (writer, element) -> { + if (element != null) { + JsonMergePatchHelper.getContentFieldDefinitionAccessor() + .prepareModelForJsonMergePatch(element, true); + writer.writeJson(element); + JsonMergePatchHelper.getContentFieldDefinitionAccessor() + .prepareModelForJsonMergePatch(element, false); + } else { + writer.writeNull(); + } + }); + } + } + if (updatedProperties.contains("examples")) { + if (this.examples == null) { + jsonWriter.writeNullField("examples"); + } else { + jsonWriter.writeArrayField("examples", this.examples, (writer, element) -> writer.writeString(element)); + } + } + if (updatedProperties.contains("enumProperty")) { + if (this.enumProperty == null) { + jsonWriter.writeNullField("enum"); + } else { + jsonWriter.writeArrayField("enum", this.enumProperty, (writer, element) -> writer.writeString(element)); + } + } + if (updatedProperties.contains("enumDescriptions")) { + if (this.enumDescriptions == null) { + jsonWriter.writeNullField("enumDescriptions"); + } else { + jsonWriter.writeMapField("enumDescriptions", this.enumDescriptions, (writer, element) -> { + if (element != null) { + writer.writeString(element); + } else { + writer.writeNull(); + } + }); + } + } + if (updatedProperties.contains("ref")) { + if (this.ref == null) { + jsonWriter.writeNullField("$ref"); + } else { + jsonWriter.writeStringField("$ref", this.ref); + } + } + if (updatedProperties.contains("estimateSourceAndConfidence")) { + if (this.estimateSourceAndConfidence == null) { + jsonWriter.writeNullField("estimateSourceAndConfidence"); + } else { + jsonWriter.writeBooleanField("estimateSourceAndConfidence", this.estimateSourceAndConfidence); + } + } + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ContentFieldDefinition from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ContentFieldDefinition if the JsonReader was pointing to an instance of it, or null if it + * was pointing to JSON null. + * @throws IOException If an error occurs while reading the ContentFieldDefinition. + */ + @Generated + public static ContentFieldDefinition fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + ContentFieldDefinition deserializedContentFieldDefinition = new ContentFieldDefinition(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("method".equals(fieldName)) { + deserializedContentFieldDefinition.method = GenerationMethod.fromString(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedContentFieldDefinition.type = ContentFieldType.fromString(reader.getString()); + } else if ("description".equals(fieldName)) { + deserializedContentFieldDefinition.description = reader.getString(); + } else if ("items".equals(fieldName)) { + deserializedContentFieldDefinition.itemDefinition = ContentFieldDefinition.fromJson(reader); + } else if ("properties".equals(fieldName)) { + Map properties + = reader.readMap(reader1 -> ContentFieldDefinition.fromJson(reader1)); + deserializedContentFieldDefinition.properties = properties; + } else if ("examples".equals(fieldName)) { + List examples = reader.readArray(reader1 -> reader1.getString()); + deserializedContentFieldDefinition.examples = examples; + } else if ("enum".equals(fieldName)) { + List enumProperty = reader.readArray(reader1 -> reader1.getString()); + deserializedContentFieldDefinition.enumProperty = enumProperty; + } else if ("enumDescriptions".equals(fieldName)) { + Map enumDescriptions = reader.readMap(reader1 -> reader1.getString()); + deserializedContentFieldDefinition.enumDescriptions = enumDescriptions; + } else if ("$ref".equals(fieldName)) { + deserializedContentFieldDefinition.ref = reader.getString(); + } else if ("estimateSourceAndConfidence".equals(fieldName)) { + deserializedContentFieldDefinition.estimateSourceAndConfidence + = reader.getNullable(JsonReader::getBoolean); + } else { + reader.skipChildren(); + } + } + + return deserializedContentFieldDefinition; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldSchema.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldSchema.java new file mode 100644 index 000000000000..3135988580fb --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldSchema.java @@ -0,0 +1,286 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.ai.contentunderstanding.implementation.JsonMergePatchHelper; +import com.azure.core.annotation.Fluent; +import com.azure.core.annotation.Generated; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * Schema of fields to be extracted from documents. + */ +@Fluent +public final class ContentFieldSchema implements JsonSerializable { + /* + * The name of the field schema. + */ + @Generated + private String name; + + /* + * A description of the field schema. + */ + @Generated + private String description; + + /* + * The fields defined in the schema. + */ + @Generated + private Map fields; + + /* + * Additional definitions referenced by the fields in the schema. + */ + @Generated + private Map definitions; + + /** + * Stores updated model property, the value is property name, not serialized name. + */ + @Generated + private final Set updatedProperties = new HashSet<>(); + + @Generated + private boolean jsonMergePatch; + + @Generated + private void serializeAsJsonMergePatch(boolean jsonMergePatch) { + this.jsonMergePatch = jsonMergePatch; + } + + static { + JsonMergePatchHelper.setContentFieldSchemaAccessor(new JsonMergePatchHelper.ContentFieldSchemaAccessor() { + @Override + public ContentFieldSchema prepareModelForJsonMergePatch(ContentFieldSchema model, + boolean jsonMergePatchEnabled) { + model.serializeAsJsonMergePatch(jsonMergePatchEnabled); + return model; + } + + @Override + public boolean isJsonMergePatch(ContentFieldSchema model) { + return model.jsonMergePatch; + } + }); + } + + /** + * Creates an instance of ContentFieldSchema class. + */ + @Generated + public ContentFieldSchema() { + } + + /** + * Get the name property: The name of the field schema. + * + * @return the name value. + */ + @Generated + public String getName() { + return this.name; + } + + /** + * Set the name property: The name of the field schema. + * + * @param name the name value to set. + * @return the ContentFieldSchema object itself. + */ + @Generated + public ContentFieldSchema setName(String name) { + this.name = name; + this.updatedProperties.add("name"); + return this; + } + + /** + * Get the description property: A description of the field schema. + * + * @return the description value. + */ + @Generated + public String getDescription() { + return this.description; + } + + /** + * Set the description property: A description of the field schema. + * + * @param description the description value to set. + * @return the ContentFieldSchema object itself. + */ + @Generated + public ContentFieldSchema setDescription(String description) { + this.description = description; + this.updatedProperties.add("description"); + return this; + } + + /** + * Get the fields property: The fields defined in the schema. + * + * @return the fields value. + */ + @Generated + public Map getFields() { + return this.fields; + } + + /** + * Set the fields property: The fields defined in the schema. + *

Required when create the resource.

+ * + * @param fields the fields value to set. + * @return the ContentFieldSchema object itself. + */ + @Generated + public ContentFieldSchema setFields(Map fields) { + this.fields = fields; + this.updatedProperties.add("fields"); + return this; + } + + /** + * Get the definitions property: Additional definitions referenced by the fields in the schema. + * + * @return the definitions value. + */ + @Generated + public Map getDefinitions() { + return this.definitions; + } + + /** + * Set the definitions property: Additional definitions referenced by the fields in the schema. + * + * @param definitions the definitions value to set. + * @return the ContentFieldSchema object itself. + */ + @Generated + public ContentFieldSchema setDefinitions(Map definitions) { + this.definitions = definitions; + this.updatedProperties.add("definitions"); + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + if (jsonMergePatch) { + return toJsonMergePatch(jsonWriter); + } else { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("name", this.name); + jsonWriter.writeStringField("description", this.description); + jsonWriter.writeMapField("fields", this.fields, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeMapField("definitions", this.definitions, (writer, element) -> writer.writeJson(element)); + return jsonWriter.writeEndObject(); + } + } + + @Generated + private JsonWriter toJsonMergePatch(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + if (updatedProperties.contains("name")) { + if (this.name == null) { + jsonWriter.writeNullField("name"); + } else { + jsonWriter.writeStringField("name", this.name); + } + } + if (updatedProperties.contains("description")) { + if (this.description == null) { + jsonWriter.writeNullField("description"); + } else { + jsonWriter.writeStringField("description", this.description); + } + } + if (updatedProperties.contains("fields")) { + if (this.fields == null) { + jsonWriter.writeNullField("fields"); + } else { + jsonWriter.writeMapField("fields", this.fields, (writer, element) -> { + if (element != null) { + JsonMergePatchHelper.getContentFieldDefinitionAccessor() + .prepareModelForJsonMergePatch(element, true); + writer.writeJson(element); + JsonMergePatchHelper.getContentFieldDefinitionAccessor() + .prepareModelForJsonMergePatch(element, false); + } else { + writer.writeNull(); + } + }); + } + } + if (updatedProperties.contains("definitions")) { + if (this.definitions == null) { + jsonWriter.writeNullField("definitions"); + } else { + jsonWriter.writeMapField("definitions", this.definitions, (writer, element) -> { + if (element != null) { + JsonMergePatchHelper.getContentFieldDefinitionAccessor() + .prepareModelForJsonMergePatch(element, true); + writer.writeJson(element); + JsonMergePatchHelper.getContentFieldDefinitionAccessor() + .prepareModelForJsonMergePatch(element, false); + } else { + writer.writeNull(); + } + }); + } + } + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ContentFieldSchema from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ContentFieldSchema if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the ContentFieldSchema. + */ + @Generated + public static ContentFieldSchema fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + ContentFieldSchema deserializedContentFieldSchema = new ContentFieldSchema(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("name".equals(fieldName)) { + deserializedContentFieldSchema.name = reader.getString(); + } else if ("description".equals(fieldName)) { + deserializedContentFieldSchema.description = reader.getString(); + } else if ("fields".equals(fieldName)) { + Map fields + = reader.readMap(reader1 -> ContentFieldDefinition.fromJson(reader1)); + deserializedContentFieldSchema.fields = fields; + } else if ("definitions".equals(fieldName)) { + Map definitions + = reader.readMap(reader1 -> ContentFieldDefinition.fromJson(reader1)); + deserializedContentFieldSchema.definitions = definitions; + } else { + reader.skipChildren(); + } + } + + return deserializedContentFieldSchema; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldType.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldType.java new file mode 100644 index 000000000000..be5174204544 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldType.java @@ -0,0 +1,99 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Semantic data type of the field value. + */ +public final class ContentFieldType extends ExpandableStringEnum { + /** + * Plain text. + */ + @Generated + public static final ContentFieldType STRING = fromString("string"); + + /** + * Date, normalized to ISO 8601 (YYYY-MM-DD) format. + */ + @Generated + public static final ContentFieldType DATE = fromString("date"); + + /** + * Time, normalized to ISO 8601 (hh:mm:ss) format. + */ + @Generated + public static final ContentFieldType TIME = fromString("time"); + + /** + * Number as double precision floating point. + */ + @Generated + public static final ContentFieldType NUMBER = fromString("number"); + + /** + * Integer as 64-bit signed integer. + */ + @Generated + public static final ContentFieldType INTEGER = fromString("integer"); + + /** + * Boolean value. + */ + @Generated + public static final ContentFieldType BOOLEAN = fromString("boolean"); + + /** + * List of subfields of the same type. + */ + @Generated + public static final ContentFieldType ARRAY = fromString("array"); + + /** + * Named list of subfields. + */ + @Generated + public static final ContentFieldType OBJECT = fromString("object"); + + /** + * JSON object. + */ + @Generated + public static final ContentFieldType JSON = fromString("json"); + + /** + * Creates a new instance of ContentFieldType value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public ContentFieldType() { + } + + /** + * Creates or finds a ContentFieldType from its string representation. + * + * @param name a name to look for. + * @return the corresponding ContentFieldType. + */ + @Generated + public static ContentFieldType fromString(String name) { + return fromString(name, ContentFieldType.class); + } + + /** + * Gets known ContentFieldType values. + * + * @return known ContentFieldType values. + */ + @Generated + public static Collection values() { + return values(ContentFieldType.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentSpan.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentSpan.java new file mode 100644 index 000000000000..15c260f6b8bc --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentSpan.java @@ -0,0 +1,105 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Position of the element in markdown, specified as a character offset and length. + */ +@Immutable +public final class ContentSpan implements JsonSerializable { + /* + * Starting position (0-indexed) of the element in markdown, specified in characters. + */ + @Generated + private final int offset; + + /* + * Length of the element in markdown, specified in characters. + */ + @Generated + private final int length; + + /** + * Creates an instance of ContentSpan class. + * + * @param offset the offset value to set. + * @param length the length value to set. + */ + @Generated + private ContentSpan(int offset, int length) { + this.offset = offset; + this.length = length; + } + + /** + * Get the offset property: Starting position (0-indexed) of the element in markdown, specified in characters. + * + * @return the offset value. + */ + @Generated + public int getOffset() { + return this.offset; + } + + /** + * Get the length property: Length of the element in markdown, specified in characters. + * + * @return the length value. + */ + @Generated + public int getLength() { + return this.length; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeIntField("offset", this.offset); + jsonWriter.writeIntField("length", this.length); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ContentSpan from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ContentSpan if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the ContentSpan. + */ + @Generated + public static ContentSpan fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + int offset = 0; + int length = 0; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("offset".equals(fieldName)) { + offset = reader.getInt(); + } else if ("length".equals(fieldName)) { + length = reader.getInt(); + } else { + reader.skipChildren(); + } + } + return new ContentSpan(offset, length); + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentUnderstandingDefaults.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentUnderstandingDefaults.java new file mode 100644 index 000000000000..49c2fa8472ce --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ContentUnderstandingDefaults.java @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.Map; + +/** + * default settings for this Content Understanding resource. + */ +public final class ContentUnderstandingDefaults implements JsonSerializable { + + /* + * Mapping of model names to deployments. + * Ex. { "gpt-4.1": "myGpt41Deployment", "text-embedding-3-large": "myTextEmbedding3LargeDeployment" }. + */ + @Generated + private final Map modelDeployments; + + /** + * Creates an instance of ContentUnderstandingDefaults class. + * + * @param modelDeployments Mapping of model names to deployments. For example: { "gpt-4.1": "myGpt41Deployment", + * "text-embedding-3-large": "myTextEmbedding3LargeDeployment" }. + */ + @Generated + public ContentUnderstandingDefaults(Map modelDeployments) { + this.modelDeployments = modelDeployments; + } + + /** + * Get the modelDeployments property: Mapping of model names to deployments. + * Ex. { "gpt-4.1": "myGpt41Deployment", "text-embedding-3-large": "myTextEmbedding3LargeDeployment" }. + * + * @return the modelDeployments value. + */ + @Generated + public Map getModelDeployments() { + return this.modelDeployments; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeMapField("modelDeployments", this.modelDeployments, + (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ContentUnderstandingDefaults from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ContentUnderstandingDefaults if the JsonReader was pointing to an instance of it, or null + * if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the ContentUnderstandingDefaults. + */ + @Generated + public static ContentUnderstandingDefaults fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + Map modelDeployments = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + if ("modelDeployments".equals(fieldName)) { + modelDeployments = reader.readMap(reader1 -> reader1.getString()); + } else { + reader.skipChildren(); + } + } + return new ContentUnderstandingDefaults(modelDeployments); + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/CopyAuthorization.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/CopyAuthorization.java new file mode 100644 index 000000000000..f9b30f74d6de --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/CopyAuthorization.java @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.core.util.CoreUtils; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.time.OffsetDateTime; +import java.time.format.DateTimeFormatter; + +/** + * Copy authorization details for cross-resource copy. + */ +@Immutable +public final class CopyAuthorization implements JsonSerializable { + /* + * Full path of the source analyzer. + */ + @Generated + private final String source; + + /* + * Azure resource ID of the target location to copy to. + */ + @Generated + private final String targetAzureResourceId; + + /* + * Date/time when the copy authorization expires. + */ + @Generated + private final OffsetDateTime expiresAt; + + /** + * Creates an instance of CopyAuthorization class. + * + * @param source the source value to set. + * @param targetAzureResourceId the targetAzureResourceId value to set. + * @param expiresAt the expiresAt value to set. + */ + @Generated + private CopyAuthorization(String source, String targetAzureResourceId, OffsetDateTime expiresAt) { + this.source = source; + this.targetAzureResourceId = targetAzureResourceId; + this.expiresAt = expiresAt; + } + + /** + * Get the source property: Full path of the source analyzer. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the targetAzureResourceId property: Azure resource ID of the target location to copy to. + * + * @return the targetAzureResourceId value. + */ + @Generated + public String getTargetAzureResourceId() { + return this.targetAzureResourceId; + } + + /** + * Get the expiresAt property: Date/time when the copy authorization expires. + * + * @return the expiresAt value. + */ + @Generated + public OffsetDateTime getExpiresAt() { + return this.expiresAt; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeStringField("targetAzureResourceId", this.targetAzureResourceId); + jsonWriter.writeStringField("expiresAt", + this.expiresAt == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.expiresAt)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of CopyAuthorization from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of CopyAuthorization if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the CopyAuthorization. + */ + @Generated + public static CopyAuthorization fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String source = null; + String targetAzureResourceId = null; + OffsetDateTime expiresAt = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("targetAzureResourceId".equals(fieldName)) { + targetAzureResourceId = reader.getString(); + } else if ("expiresAt".equals(fieldName)) { + expiresAt = reader + .getNullable(nonNullReader -> CoreUtils.parseBestOffsetDateTime(nonNullReader.getString())); + } else { + reader.skipChildren(); + } + } + return new CopyAuthorization(source, targetAzureResourceId, expiresAt); + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DateField.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DateField.java new file mode 100644 index 000000000000..1bd11e682574 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DateField.java @@ -0,0 +1,113 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.time.LocalDate; +import java.util.List; +import java.util.Objects; + +/** + * Date field extracted from the content. + */ +@Immutable +public final class DateField extends ContentField { + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type = ContentFieldType.DATE; + + /* + * Date field value, in ISO 8601 (YYYY-MM-DD) format. + */ + @Generated + private LocalDate valueDate; + + /** + * Creates an instance of DateField class. + */ + @Generated + private DateField() { + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + @Override + public ContentFieldType getType() { + return this.type; + } + + /** + * Get the valueDate property: Date field value, in ISO 8601 (YYYY-MM-DD) format. + * + * @return the valueDate value. + */ + @Generated + public LocalDate getValueDate() { + return this.valueDate; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("spans", getSpans(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("confidence", getConfidence()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeStringField("valueDate", Objects.toString(this.valueDate, null)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DateField from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DateField if the JsonReader was pointing to an instance of it, or null if it was pointing + * to JSON null. + * @throws IOException If an error occurs while reading the DateField. + */ + @Generated + public static DateField fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + DateField deserializedDateField = new DateField(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("spans".equals(fieldName)) { + List spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + deserializedDateField.setSpans(spans); + } else if ("confidence".equals(fieldName)) { + deserializedDateField.setConfidence(reader.getNullable(JsonReader::getDouble)); + } else if ("source".equals(fieldName)) { + deserializedDateField.setSource(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedDateField.type = ContentFieldType.fromString(reader.getString()); + } else if ("valueDate".equals(fieldName)) { + deserializedDateField.valueDate + = reader.getNullable(nonNullReader -> LocalDate.parse(nonNullReader.getString())); + } else { + reader.skipChildren(); + } + } + + return deserializedDateField; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotation.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotation.java new file mode 100644 index 000000000000..3d56ac15146e --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotation.java @@ -0,0 +1,262 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.core.util.CoreUtils; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.time.OffsetDateTime; +import java.time.format.DateTimeFormatter; +import java.util.List; + +/** + * Annotation in a document, such as a strikethrough or a comment. + */ +@Immutable +public final class DocumentAnnotation implements JsonSerializable { + /* + * Annotation identifier. + */ + @Generated + private final String id; + + /* + * Annotation kind. + */ + @Generated + private final DocumentAnnotationKind kind; + + /* + * Spans of the content associated with the annotation. + */ + @Generated + private List spans; + + /* + * Position of the annotation. + */ + @Generated + private String source; + + /* + * Comments associated with the annotation. + */ + @Generated + private List comments; + + /* + * Annotation author. + */ + @Generated + private String author; + + /* + * Date and time when the annotation was created. + */ + @Generated + private OffsetDateTime createdAt; + + /* + * Date and time when the annotation was last modified. + */ + @Generated + private OffsetDateTime lastModifiedAt; + + /* + * Tags associated with the annotation. + */ + @Generated + private List tags; + + /** + * Creates an instance of DocumentAnnotation class. + * + * @param id the id value to set. + * @param kind the kind value to set. + */ + @Generated + private DocumentAnnotation(String id, DocumentAnnotationKind kind) { + this.id = id; + this.kind = kind; + } + + /** + * Get the id property: Annotation identifier. + * + * @return the id value. + */ + @Generated + public String getId() { + return this.id; + } + + /** + * Get the kind property: Annotation kind. + * + * @return the kind value. + */ + @Generated + public DocumentAnnotationKind getKind() { + return this.kind; + } + + /** + * Get the spans property: Spans of the content associated with the annotation. + * + * @return the spans value. + */ + @Generated + public List getSpans() { + return this.spans; + } + + /** + * Get the source property: Position of the annotation. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the comments property: Comments associated with the annotation. + * + * @return the comments value. + */ + @Generated + public List getComments() { + return this.comments; + } + + /** + * Get the author property: Annotation author. + * + * @return the author value. + */ + @Generated + public String getAuthor() { + return this.author; + } + + /** + * Get the createdAt property: Date and time when the annotation was created. + * + * @return the createdAt value. + */ + @Generated + public OffsetDateTime getCreatedAt() { + return this.createdAt; + } + + /** + * Get the lastModifiedAt property: Date and time when the annotation was last modified. + * + * @return the lastModifiedAt value. + */ + @Generated + public OffsetDateTime getLastModifiedAt() { + return this.lastModifiedAt; + } + + /** + * Get the tags property: Tags associated with the annotation. + * + * @return the tags value. + */ + @Generated + public List getTags() { + return this.tags; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("id", this.id); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + jsonWriter.writeArrayField("spans", this.spans, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeArrayField("comments", this.comments, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("author", this.author); + jsonWriter.writeStringField("createdAt", + this.createdAt == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.createdAt)); + jsonWriter.writeStringField("lastModifiedAt", + this.lastModifiedAt == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.lastModifiedAt)); + jsonWriter.writeArrayField("tags", this.tags, (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentAnnotation from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentAnnotation if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentAnnotation. + */ + @Generated + public static DocumentAnnotation fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String id = null; + DocumentAnnotationKind kind = null; + List spans = null; + String source = null; + List comments = null; + String author = null; + OffsetDateTime createdAt = null; + OffsetDateTime lastModifiedAt = null; + List tags = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("id".equals(fieldName)) { + id = reader.getString(); + } else if ("kind".equals(fieldName)) { + kind = DocumentAnnotationKind.fromString(reader.getString()); + } else if ("spans".equals(fieldName)) { + spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("comments".equals(fieldName)) { + comments = reader.readArray(reader1 -> DocumentAnnotationComment.fromJson(reader1)); + } else if ("author".equals(fieldName)) { + author = reader.getString(); + } else if ("createdAt".equals(fieldName)) { + createdAt = reader + .getNullable(nonNullReader -> CoreUtils.parseBestOffsetDateTime(nonNullReader.getString())); + } else if ("lastModifiedAt".equals(fieldName)) { + lastModifiedAt = reader + .getNullable(nonNullReader -> CoreUtils.parseBestOffsetDateTime(nonNullReader.getString())); + } else if ("tags".equals(fieldName)) { + tags = reader.readArray(reader1 -> reader1.getString()); + } else { + reader.skipChildren(); + } + } + DocumentAnnotation deserializedDocumentAnnotation = new DocumentAnnotation(id, kind); + deserializedDocumentAnnotation.spans = spans; + deserializedDocumentAnnotation.source = source; + deserializedDocumentAnnotation.comments = comments; + deserializedDocumentAnnotation.author = author; + deserializedDocumentAnnotation.createdAt = createdAt; + deserializedDocumentAnnotation.lastModifiedAt = lastModifiedAt; + deserializedDocumentAnnotation.tags = tags; + + return deserializedDocumentAnnotation; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotationComment.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotationComment.java new file mode 100644 index 000000000000..0011dc3f2b71 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotationComment.java @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.core.util.CoreUtils; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.time.OffsetDateTime; +import java.time.format.DateTimeFormatter; +import java.util.List; + +/** + * Comment associated with a document annotation. + */ +@Immutable +public final class DocumentAnnotationComment implements JsonSerializable { + /* + * Comment message in Markdown. + */ + @Generated + private final String message; + + /* + * Author of the comment. + */ + @Generated + private String author; + + /* + * Date and time when the comment was created. + */ + @Generated + private OffsetDateTime createdAt; + + /* + * Date and time when the comment was last modified. + */ + @Generated + private OffsetDateTime lastModifiedAt; + + /* + * Tags associated with the comment. + */ + @Generated + private List tags; + + /** + * Creates an instance of DocumentAnnotationComment class. + * + * @param message the message value to set. + */ + @Generated + private DocumentAnnotationComment(String message) { + this.message = message; + } + + /** + * Get the message property: Comment message in Markdown. + * + * @return the message value. + */ + @Generated + public String getMessage() { + return this.message; + } + + /** + * Get the author property: Author of the comment. + * + * @return the author value. + */ + @Generated + public String getAuthor() { + return this.author; + } + + /** + * Get the createdAt property: Date and time when the comment was created. + * + * @return the createdAt value. + */ + @Generated + public OffsetDateTime getCreatedAt() { + return this.createdAt; + } + + /** + * Get the lastModifiedAt property: Date and time when the comment was last modified. + * + * @return the lastModifiedAt value. + */ + @Generated + public OffsetDateTime getLastModifiedAt() { + return this.lastModifiedAt; + } + + /** + * Get the tags property: Tags associated with the comment. + * + * @return the tags value. + */ + @Generated + public List getTags() { + return this.tags; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("message", this.message); + jsonWriter.writeStringField("author", this.author); + jsonWriter.writeStringField("createdAt", + this.createdAt == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.createdAt)); + jsonWriter.writeStringField("lastModifiedAt", + this.lastModifiedAt == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.lastModifiedAt)); + jsonWriter.writeArrayField("tags", this.tags, (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentAnnotationComment from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentAnnotationComment if the JsonReader was pointing to an instance of it, or null if + * it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentAnnotationComment. + */ + @Generated + public static DocumentAnnotationComment fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String message = null; + String author = null; + OffsetDateTime createdAt = null; + OffsetDateTime lastModifiedAt = null; + List tags = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("message".equals(fieldName)) { + message = reader.getString(); + } else if ("author".equals(fieldName)) { + author = reader.getString(); + } else if ("createdAt".equals(fieldName)) { + createdAt = reader + .getNullable(nonNullReader -> CoreUtils.parseBestOffsetDateTime(nonNullReader.getString())); + } else if ("lastModifiedAt".equals(fieldName)) { + lastModifiedAt = reader + .getNullable(nonNullReader -> CoreUtils.parseBestOffsetDateTime(nonNullReader.getString())); + } else if ("tags".equals(fieldName)) { + tags = reader.readArray(reader1 -> reader1.getString()); + } else { + reader.skipChildren(); + } + } + DocumentAnnotationComment deserializedDocumentAnnotationComment = new DocumentAnnotationComment(message); + deserializedDocumentAnnotationComment.author = author; + deserializedDocumentAnnotationComment.createdAt = createdAt; + deserializedDocumentAnnotationComment.lastModifiedAt = lastModifiedAt; + deserializedDocumentAnnotationComment.tags = tags; + + return deserializedDocumentAnnotationComment; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotationKind.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotationKind.java new file mode 100644 index 000000000000..8710fb5d7d91 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotationKind.java @@ -0,0 +1,87 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Document annotation kind. + */ +public final class DocumentAnnotationKind extends ExpandableStringEnum { + /** + * Highlight annotation. + */ + @Generated + public static final DocumentAnnotationKind HIGHLIGHT = fromString("highlight"); + + /** + * Strikethrough annotation. + */ + @Generated + public static final DocumentAnnotationKind STRIKETHROUGH = fromString("strikethrough"); + + /** + * Underline annotation. + */ + @Generated + public static final DocumentAnnotationKind UNDERLINE = fromString("underline"); + + /** + * Italic annotation. + */ + @Generated + public static final DocumentAnnotationKind ITALIC = fromString("italic"); + + /** + * Bold annotation. + */ + @Generated + public static final DocumentAnnotationKind BOLD = fromString("bold"); + + /** + * Circle annotation. + */ + @Generated + public static final DocumentAnnotationKind CIRCLE = fromString("circle"); + + /** + * Note annotation. + */ + @Generated + public static final DocumentAnnotationKind NOTE = fromString("note"); + + /** + * Creates a new instance of DocumentAnnotationKind value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public DocumentAnnotationKind() { + } + + /** + * Creates or finds a DocumentAnnotationKind from its string representation. + * + * @param name a name to look for. + * @return the corresponding DocumentAnnotationKind. + */ + @Generated + public static DocumentAnnotationKind fromString(String name) { + return fromString(name, DocumentAnnotationKind.class); + } + + /** + * Gets known DocumentAnnotationKind values. + * + * @return known DocumentAnnotationKind values. + */ + @Generated + public static Collection values() { + return values(DocumentAnnotationKind.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentBarcode.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentBarcode.java new file mode 100644 index 000000000000..37a32425a747 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentBarcode.java @@ -0,0 +1,170 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Barcode in a document. + */ +@Immutable +public final class DocumentBarcode implements JsonSerializable { + /* + * Barcode kind. + */ + @Generated + private final DocumentBarcodeKind kind; + + /* + * Barcode value. + */ + @Generated + private final String value; + + /* + * Encoded source that identifies the position of the barcode in the content. + */ + @Generated + private String source; + + /* + * Span of the barcode in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * Confidence of predicting the barcode. + */ + @Generated + private Double confidence; + + /** + * Creates an instance of DocumentBarcode class. + * + * @param kind the kind value to set. + * @param value the value value to set. + */ + @Generated + private DocumentBarcode(DocumentBarcodeKind kind, String value) { + this.kind = kind; + this.value = value; + } + + /** + * Get the kind property: Barcode kind. + * + * @return the kind value. + */ + @Generated + public DocumentBarcodeKind getKind() { + return this.kind; + } + + /** + * Get the value property: Barcode value. + * + * @return the value value. + */ + @Generated + public String getValue() { + return this.value; + } + + /** + * Get the source property: Encoded source that identifies the position of the barcode in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the span property: Span of the barcode in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the confidence property: Confidence of predicting the barcode. + * + * @return the confidence value. + */ + @Generated + public Double getConfidence() { + return this.confidence; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + jsonWriter.writeStringField("value", this.value); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeNumberField("confidence", this.confidence); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentBarcode from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentBarcode if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentBarcode. + */ + @Generated + public static DocumentBarcode fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + DocumentBarcodeKind kind = null; + String value = null; + String source = null; + ContentSpan span = null; + Double confidence = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("kind".equals(fieldName)) { + kind = DocumentBarcodeKind.fromString(reader.getString()); + } else if ("value".equals(fieldName)) { + value = reader.getString(); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("confidence".equals(fieldName)) { + confidence = reader.getNullable(JsonReader::getDouble); + } else { + reader.skipChildren(); + } + } + DocumentBarcode deserializedDocumentBarcode = new DocumentBarcode(kind, value); + deserializedDocumentBarcode.source = source; + deserializedDocumentBarcode.span = span; + deserializedDocumentBarcode.confidence = confidence; + + return deserializedDocumentBarcode; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentBarcodeKind.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentBarcodeKind.java new file mode 100644 index 000000000000..b639059977f5 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentBarcodeKind.java @@ -0,0 +1,147 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Barcode kind. + */ +public final class DocumentBarcodeKind extends ExpandableStringEnum { + /** + * QR code, as defined in ISO/IEC 18004:2015. + */ + @Generated + public static final DocumentBarcodeKind QRCODE = fromString("QRCode"); + + /** + * PDF417, as defined in ISO 15438. + */ + @Generated + public static final DocumentBarcodeKind PDF417 = fromString("PDF417"); + + /** + * GS1 12-digit Universal Product Code. + */ + @Generated + public static final DocumentBarcodeKind UPCA = fromString("UPCA"); + + /** + * GS1 6-digit Universal Product Code. + */ + @Generated + public static final DocumentBarcodeKind UPCE = fromString("UPCE"); + + /** + * Code 39 barcode, as defined in ISO/IEC 16388:2007. + */ + @Generated + public static final DocumentBarcodeKind CODE39 = fromString("Code39"); + + /** + * Code 128 barcode, as defined in ISO/IEC 15417:2007. + */ + @Generated + public static final DocumentBarcodeKind CODE128 = fromString("Code128"); + + /** + * GS1 8-digit International Article Number (European Article Number). + */ + @Generated + public static final DocumentBarcodeKind EAN8 = fromString("EAN8"); + + /** + * GS1 13-digit International Article Number (European Article Number). + */ + @Generated + public static final DocumentBarcodeKind EAN13 = fromString("EAN13"); + + /** + * GS1 DataBar barcode. + */ + @Generated + public static final DocumentBarcodeKind DATA_BAR = fromString("DataBar"); + + /** + * Code 93 barcode, as defined in ANSI/AIM BC5-1995. + */ + @Generated + public static final DocumentBarcodeKind CODE93 = fromString("Code93"); + + /** + * Codabar barcode, as defined in ANSI/AIM BC3-1995. + */ + @Generated + public static final DocumentBarcodeKind CODABAR = fromString("Codabar"); + + /** + * GS1 DataBar Expanded barcode. + */ + @Generated + public static final DocumentBarcodeKind DATA_BAR_EXPANDED = fromString("DataBarExpanded"); + + /** + * Interleaved 2 of 5 barcode, as defined in ANSI/AIM BC2-1995. + */ + @Generated + public static final DocumentBarcodeKind ITF = fromString("ITF"); + + /** + * Micro QR code, as defined in ISO/IEC 23941:2022. + */ + @Generated + public static final DocumentBarcodeKind MICRO_QRCODE = fromString("MicroQRCode"); + + /** + * Aztec code, as defined in ISO/IEC 24778:2008. + */ + @Generated + public static final DocumentBarcodeKind AZTEC = fromString("Aztec"); + + /** + * Data matrix code, as defined in ISO/IEC 16022:2006. + */ + @Generated + public static final DocumentBarcodeKind DATA_MATRIX = fromString("DataMatrix"); + + /** + * MaxiCode, as defined in ISO/IEC 16023:2000. + */ + @Generated + public static final DocumentBarcodeKind MAXI_CODE = fromString("MaxiCode"); + + /** + * Creates a new instance of DocumentBarcodeKind value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public DocumentBarcodeKind() { + } + + /** + * Creates or finds a DocumentBarcodeKind from its string representation. + * + * @param name a name to look for. + * @return the corresponding DocumentBarcodeKind. + */ + @Generated + public static DocumentBarcodeKind fromString(String name) { + return fromString(name, DocumentBarcodeKind.class); + } + + /** + * Gets known DocumentBarcodeKind values. + * + * @return known DocumentBarcodeKind values. + */ + @Generated + public static Collection values() { + return values(DocumentBarcodeKind.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentCaption.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentCaption.java new file mode 100644 index 000000000000..49d2b45302d0 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentCaption.java @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Caption of a table or figure. + */ +@Immutable +public final class DocumentCaption implements JsonSerializable { + /* + * Content of the caption. + */ + @Generated + private final String content; + + /* + * Encoded source that identifies the position of the caption in the content. + */ + @Generated + private String source; + + /* + * Span of the caption in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * Child elements of the caption. + */ + @Generated + private List elements; + + /** + * Creates an instance of DocumentCaption class. + * + * @param content the content value to set. + */ + @Generated + private DocumentCaption(String content) { + this.content = content; + } + + /** + * Get the content property: Content of the caption. + * + * @return the content value. + */ + @Generated + public String getContent() { + return this.content; + } + + /** + * Get the source property: Encoded source that identifies the position of the caption in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the span property: Span of the caption in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the elements property: Child elements of the caption. + * + * @return the elements value. + */ + @Generated + public List getElements() { + return this.elements; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("content", this.content); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeArrayField("elements", this.elements, (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentCaption from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentCaption if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentCaption. + */ + @Generated + public static DocumentCaption fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String content = null; + String source = null; + ContentSpan span = null; + List elements = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("content".equals(fieldName)) { + content = reader.getString(); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("elements".equals(fieldName)) { + elements = reader.readArray(reader1 -> reader1.getString()); + } else { + reader.skipChildren(); + } + } + DocumentCaption deserializedDocumentCaption = new DocumentCaption(content); + deserializedDocumentCaption.source = source; + deserializedDocumentCaption.span = span; + deserializedDocumentCaption.elements = elements; + + return deserializedDocumentCaption; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentChartFigure.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentChartFigure.java new file mode 100644 index 000000000000..a52a9d3ecf76 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentChartFigure.java @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.core.util.BinaryData; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * Figure containing a chart, such as a bar chart, line chart, or pie chart. + */ +@Immutable +public final class DocumentChartFigure extends DocumentFigure { + /* + * Figure kind. + */ + @Generated + private DocumentFigureKind kind = DocumentFigureKind.CHART; + + /* + * Chart content represented using [Chart.js config](https://www.chartjs.org/docs/latest/configuration/). + */ + @Generated + private final Map content; + + /** + * Creates an instance of DocumentChartFigure class. + * + * @param id the id value to set. + * @param content the content value to set. + */ + @Generated + private DocumentChartFigure(String id, Map content) { + super(id); + this.content = content; + } + + /** + * Get the kind property: Figure kind. + * + * @return the kind value. + */ + @Generated + @Override + public DocumentFigureKind getKind() { + return this.kind; + } + + /** + * Get the content property: Chart content represented using [Chart.js + * config](https://www.chartjs.org/docs/latest/configuration/). + * + * @return the content value. + */ + @Generated + public Map getContent() { + return this.content; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("id", getId()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeJsonField("span", getSpan()); + jsonWriter.writeArrayField("elements", getElements(), (writer, element) -> writer.writeString(element)); + jsonWriter.writeJsonField("caption", getCaption()); + jsonWriter.writeArrayField("footnotes", getFootnotes(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("description", getDescription()); + jsonWriter.writeStringField("role", getRole() == null ? null : getRole().toString()); + jsonWriter.writeMapField("content", this.content, + (writer, element) -> writer.writeUntyped(element == null ? null : element.toObject(Object.class))); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentChartFigure from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentChartFigure if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentChartFigure. + */ + @Generated + public static DocumentChartFigure fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String id = null; + String source = null; + ContentSpan span = null; + List elements = null; + DocumentCaption caption = null; + List footnotes = null; + String description = null; + SemanticRole role = null; + Map content = null; + DocumentFigureKind kind = DocumentFigureKind.CHART; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("id".equals(fieldName)) { + id = reader.getString(); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("elements".equals(fieldName)) { + elements = reader.readArray(reader1 -> reader1.getString()); + } else if ("caption".equals(fieldName)) { + caption = DocumentCaption.fromJson(reader); + } else if ("footnotes".equals(fieldName)) { + footnotes = reader.readArray(reader1 -> DocumentFootnote.fromJson(reader1)); + } else if ("description".equals(fieldName)) { + description = reader.getString(); + } else if ("role".equals(fieldName)) { + role = SemanticRole.fromString(reader.getString()); + } else if ("content".equals(fieldName)) { + content = reader.readMap(reader1 -> reader1 + .getNullable(nonNullReader -> BinaryData.fromObject(nonNullReader.readUntyped()))); + } else if ("kind".equals(fieldName)) { + kind = DocumentFigureKind.fromString(reader.getString()); + } else { + reader.skipChildren(); + } + } + DocumentChartFigure deserializedDocumentChartFigure = new DocumentChartFigure(id, content); + deserializedDocumentChartFigure.setSource(source); + deserializedDocumentChartFigure.setSpan(span); + deserializedDocumentChartFigure.setElements(elements); + deserializedDocumentChartFigure.setCaption(caption); + deserializedDocumentChartFigure.setFootnotes(footnotes); + deserializedDocumentChartFigure.setDescription(description); + deserializedDocumentChartFigure.setRole(role); + deserializedDocumentChartFigure.kind = kind; + + return deserializedDocumentChartFigure; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentContent.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentContent.java new file mode 100644 index 000000000000..8a5929fab100 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentContent.java @@ -0,0 +1,353 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * Document content. Ex. text/plain, application/pdf, image/jpeg. + */ +@Immutable +public final class DocumentContent extends MediaContent { + /* + * Content kind. + */ + @Generated + private MediaContentKind kind = MediaContentKind.DOCUMENT; + + /* + * Start page number (1-indexed) of the content. + */ + @Generated + private final int startPageNumber; + + /* + * End page number (1-indexed) of the content. + */ + @Generated + private final int endPageNumber; + + /* + * Length unit used by the width, height, and source properties. + * For images/tiff, the default unit is pixel. For PDF, the default unit is inch. + */ + @Generated + private LengthUnit unit; + + /* + * List of pages in the document. + */ + @Generated + private List pages; + + /* + * List of paragraphs in the document. Only if enableOcr and returnDetails are true. + */ + @Generated + private List paragraphs; + + /* + * List of sections in the document. Only if enableLayout and returnDetails are true. + */ + @Generated + private List sections; + + /* + * List of tables in the document. Only if enableLayout and returnDetails are true. + */ + @Generated + private List tables; + + /* + * List of figures in the document. Only if enableLayout and returnDetails are true. + */ + @Generated + private List figures; + + /* + * List of annotations in the document. Only if enableAnnotations and returnDetails are true. + */ + @Generated + private List annotations; + + /* + * List of hyperlinks in the document. Only if returnDetails are true. + */ + @Generated + private List hyperlinks; + + /* + * List of detected content segments. Only if enableSegment is true. + */ + @Generated + private List segments; + + /** + * Creates an instance of DocumentContent class. + * + * @param mimeType the mimeType value to set. + * @param startPageNumber the startPageNumber value to set. + * @param endPageNumber the endPageNumber value to set. + */ + @Generated + private DocumentContent(String mimeType, int startPageNumber, int endPageNumber) { + super(mimeType); + this.startPageNumber = startPageNumber; + this.endPageNumber = endPageNumber; + } + + /** + * Get the kind property: Content kind. + * + * @return the kind value. + */ + @Generated + @Override + public MediaContentKind getKind() { + return this.kind; + } + + /** + * Get the startPageNumber property: Start page number (1-indexed) of the content. + * + * @return the startPageNumber value. + */ + @Generated + public int getStartPageNumber() { + return this.startPageNumber; + } + + /** + * Get the endPageNumber property: End page number (1-indexed) of the content. + * + * @return the endPageNumber value. + */ + @Generated + public int getEndPageNumber() { + return this.endPageNumber; + } + + /** + * Get the unit property: Length unit used by the width, height, and source properties. + * For images/tiff, the default unit is pixel. For PDF, the default unit is inch. + * + * @return the unit value. + */ + @Generated + public LengthUnit getUnit() { + return this.unit; + } + + /** + * Get the pages property: List of pages in the document. + * + * @return the pages value. + */ + @Generated + public List getPages() { + return this.pages; + } + + /** + * Get the paragraphs property: List of paragraphs in the document. Only if enableOcr and returnDetails are true. + * + * @return the paragraphs value. + */ + @Generated + public List getParagraphs() { + return this.paragraphs; + } + + /** + * Get the sections property: List of sections in the document. Only if enableLayout and returnDetails are true. + * + * @return the sections value. + */ + @Generated + public List getSections() { + return this.sections; + } + + /** + * Get the tables property: List of tables in the document. Only if enableLayout and returnDetails are true. + * + * @return the tables value. + */ + @Generated + public List getTables() { + return this.tables; + } + + /** + * Get the figures property: List of figures in the document. Only if enableLayout and returnDetails are true. + * + * @return the figures value. + */ + @Generated + public List getFigures() { + return this.figures; + } + + /** + * Get the annotations property: List of annotations in the document. Only if enableAnnotations and returnDetails + * are true. + * + * @return the annotations value. + */ + @Generated + public List getAnnotations() { + return this.annotations; + } + + /** + * Get the hyperlinks property: List of hyperlinks in the document. Only if returnDetails are true. + * + * @return the hyperlinks value. + */ + @Generated + public List getHyperlinks() { + return this.hyperlinks; + } + + /** + * Get the segments property: List of detected content segments. Only if enableSegment is true. + * + * @return the segments value. + */ + @Generated + public List getSegments() { + return this.segments; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("mimeType", getMimeType()); + jsonWriter.writeStringField("analyzerId", getAnalyzerId()); + jsonWriter.writeStringField("category", getCategory()); + jsonWriter.writeStringField("path", getPath()); + jsonWriter.writeStringField("markdown", getMarkdown()); + jsonWriter.writeMapField("fields", getFields(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeIntField("startPageNumber", this.startPageNumber); + jsonWriter.writeIntField("endPageNumber", this.endPageNumber); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + jsonWriter.writeStringField("unit", this.unit == null ? null : this.unit.toString()); + jsonWriter.writeArrayField("pages", this.pages, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("paragraphs", this.paragraphs, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("sections", this.sections, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("tables", this.tables, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("figures", this.figures, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("annotations", this.annotations, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("hyperlinks", this.hyperlinks, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("segments", this.segments, (writer, element) -> writer.writeJson(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentContent from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentContent if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentContent. + */ + @Generated + public static DocumentContent fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String mimeType = null; + String analyzerId = null; + String category = null; + String path = null; + String markdown = null; + Map fields = null; + int startPageNumber = 0; + int endPageNumber = 0; + MediaContentKind kind = MediaContentKind.DOCUMENT; + LengthUnit unit = null; + List pages = null; + List paragraphs = null; + List sections = null; + List tables = null; + List figures = null; + List annotations = null; + List hyperlinks = null; + List segments = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("mimeType".equals(fieldName)) { + mimeType = reader.getString(); + } else if ("analyzerId".equals(fieldName)) { + analyzerId = reader.getString(); + } else if ("category".equals(fieldName)) { + category = reader.getString(); + } else if ("path".equals(fieldName)) { + path = reader.getString(); + } else if ("markdown".equals(fieldName)) { + markdown = reader.getString(); + } else if ("fields".equals(fieldName)) { + fields = reader.readMap(reader1 -> ContentField.fromJson(reader1)); + } else if ("startPageNumber".equals(fieldName)) { + startPageNumber = reader.getInt(); + } else if ("endPageNumber".equals(fieldName)) { + endPageNumber = reader.getInt(); + } else if ("kind".equals(fieldName)) { + kind = MediaContentKind.fromString(reader.getString()); + } else if ("unit".equals(fieldName)) { + unit = LengthUnit.fromString(reader.getString()); + } else if ("pages".equals(fieldName)) { + pages = reader.readArray(reader1 -> DocumentPage.fromJson(reader1)); + } else if ("paragraphs".equals(fieldName)) { + paragraphs = reader.readArray(reader1 -> DocumentParagraph.fromJson(reader1)); + } else if ("sections".equals(fieldName)) { + sections = reader.readArray(reader1 -> DocumentSection.fromJson(reader1)); + } else if ("tables".equals(fieldName)) { + tables = reader.readArray(reader1 -> DocumentTable.fromJson(reader1)); + } else if ("figures".equals(fieldName)) { + figures = reader.readArray(reader1 -> DocumentFigure.fromJson(reader1)); + } else if ("annotations".equals(fieldName)) { + annotations = reader.readArray(reader1 -> DocumentAnnotation.fromJson(reader1)); + } else if ("hyperlinks".equals(fieldName)) { + hyperlinks = reader.readArray(reader1 -> DocumentHyperlink.fromJson(reader1)); + } else if ("segments".equals(fieldName)) { + segments = reader.readArray(reader1 -> DocumentContentSegment.fromJson(reader1)); + } else { + reader.skipChildren(); + } + } + DocumentContent deserializedDocumentContent = new DocumentContent(mimeType, startPageNumber, endPageNumber); + deserializedDocumentContent.setAnalyzerId(analyzerId); + deserializedDocumentContent.setCategory(category); + deserializedDocumentContent.setPath(path); + deserializedDocumentContent.setMarkdown(markdown); + deserializedDocumentContent.setFields(fields); + deserializedDocumentContent.kind = kind; + deserializedDocumentContent.unit = unit; + deserializedDocumentContent.pages = pages; + deserializedDocumentContent.paragraphs = paragraphs; + deserializedDocumentContent.sections = sections; + deserializedDocumentContent.tables = tables; + deserializedDocumentContent.figures = figures; + deserializedDocumentContent.annotations = annotations; + deserializedDocumentContent.hyperlinks = hyperlinks; + deserializedDocumentContent.segments = segments; + + return deserializedDocumentContent; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentContentSegment.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentContentSegment.java new file mode 100644 index 000000000000..a3d85d4b955d --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentContentSegment.java @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Detected document content segment. + */ +@Immutable +public final class DocumentContentSegment implements JsonSerializable { + /* + * Segment identifier. + */ + @Generated + private final String segmentId; + + /* + * Classified content category. + */ + @Generated + private final String category; + + /* + * Span of the segment in the markdown content. + */ + @Generated + private final ContentSpan span; + + /* + * Start page number (1-indexed) of the segment. + */ + @Generated + private final int startPageNumber; + + /* + * End page number (1-indexed) of the segment. + */ + @Generated + private final int endPageNumber; + + /** + * Creates an instance of DocumentContentSegment class. + * + * @param segmentId the segmentId value to set. + * @param category the category value to set. + * @param span the span value to set. + * @param startPageNumber the startPageNumber value to set. + * @param endPageNumber the endPageNumber value to set. + */ + @Generated + private DocumentContentSegment(String segmentId, String category, ContentSpan span, int startPageNumber, + int endPageNumber) { + this.segmentId = segmentId; + this.category = category; + this.span = span; + this.startPageNumber = startPageNumber; + this.endPageNumber = endPageNumber; + } + + /** + * Get the segmentId property: Segment identifier. + * + * @return the segmentId value. + */ + @Generated + public String getSegmentId() { + return this.segmentId; + } + + /** + * Get the category property: Classified content category. + * + * @return the category value. + */ + @Generated + public String getCategory() { + return this.category; + } + + /** + * Get the span property: Span of the segment in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the startPageNumber property: Start page number (1-indexed) of the segment. + * + * @return the startPageNumber value. + */ + @Generated + public int getStartPageNumber() { + return this.startPageNumber; + } + + /** + * Get the endPageNumber property: End page number (1-indexed) of the segment. + * + * @return the endPageNumber value. + */ + @Generated + public int getEndPageNumber() { + return this.endPageNumber; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("segmentId", this.segmentId); + jsonWriter.writeStringField("category", this.category); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeIntField("startPageNumber", this.startPageNumber); + jsonWriter.writeIntField("endPageNumber", this.endPageNumber); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentContentSegment from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentContentSegment if the JsonReader was pointing to an instance of it, or null if it + * was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentContentSegment. + */ + @Generated + public static DocumentContentSegment fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String segmentId = null; + String category = null; + ContentSpan span = null; + int startPageNumber = 0; + int endPageNumber = 0; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("segmentId".equals(fieldName)) { + segmentId = reader.getString(); + } else if ("category".equals(fieldName)) { + category = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("startPageNumber".equals(fieldName)) { + startPageNumber = reader.getInt(); + } else if ("endPageNumber".equals(fieldName)) { + endPageNumber = reader.getInt(); + } else { + reader.skipChildren(); + } + } + return new DocumentContentSegment(segmentId, category, span, startPageNumber, endPageNumber); + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFigure.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFigure.java new file mode 100644 index 000000000000..f3bc9b9d3f16 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFigure.java @@ -0,0 +1,366 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Figure in a document. + */ +@Immutable +public class DocumentFigure implements JsonSerializable { + /* + * Figure kind. + */ + @Generated + private DocumentFigureKind kind = DocumentFigureKind.fromString("DocumentFigure"); + + /* + * Figure identifier. + */ + @Generated + private final String id; + + /* + * Encoded source that identifies the position of the figure in the content. + */ + @Generated + private String source; + + /* + * Span of the figure in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * Child elements of the figure, excluding any caption or footnotes. + */ + @Generated + private List elements; + + /* + * Figure caption. + */ + @Generated + private DocumentCaption caption; + + /* + * List of figure footnotes. + */ + @Generated + private List footnotes; + + /* + * Description of the figure. + */ + @Generated + private String description; + + /* + * Semantic role of the figure. + */ + @Generated + private SemanticRole role; + + /** + * Creates an instance of DocumentFigure class. + * + * @param id the id value to set. + */ + @Generated + protected DocumentFigure(String id) { + this.id = id; + } + + /** + * Get the kind property: Figure kind. + * + * @return the kind value. + */ + @Generated + public DocumentFigureKind getKind() { + return this.kind; + } + + /** + * Get the id property: Figure identifier. + * + * @return the id value. + */ + @Generated + public String getId() { + return this.id; + } + + /** + * Get the source property: Encoded source that identifies the position of the figure in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Set the source property: Encoded source that identifies the position of the figure in the content. + * + * @param source the source value to set. + * @return the DocumentFigure object itself. + */ + @Generated + DocumentFigure setSource(String source) { + this.source = source; + return this; + } + + /** + * Get the span property: Span of the figure in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Set the span property: Span of the figure in the markdown content. + * + * @param span the span value to set. + * @return the DocumentFigure object itself. + */ + @Generated + DocumentFigure setSpan(ContentSpan span) { + this.span = span; + return this; + } + + /** + * Get the elements property: Child elements of the figure, excluding any caption or footnotes. + * + * @return the elements value. + */ + @Generated + public List getElements() { + return this.elements; + } + + /** + * Set the elements property: Child elements of the figure, excluding any caption or footnotes. + * + * @param elements the elements value to set. + * @return the DocumentFigure object itself. + */ + @Generated + DocumentFigure setElements(List elements) { + this.elements = elements; + return this; + } + + /** + * Get the caption property: Figure caption. + * + * @return the caption value. + */ + @Generated + public DocumentCaption getCaption() { + return this.caption; + } + + /** + * Set the caption property: Figure caption. + * + * @param caption the caption value to set. + * @return the DocumentFigure object itself. + */ + @Generated + DocumentFigure setCaption(DocumentCaption caption) { + this.caption = caption; + return this; + } + + /** + * Get the footnotes property: List of figure footnotes. + * + * @return the footnotes value. + */ + @Generated + public List getFootnotes() { + return this.footnotes; + } + + /** + * Set the footnotes property: List of figure footnotes. + * + * @param footnotes the footnotes value to set. + * @return the DocumentFigure object itself. + */ + @Generated + DocumentFigure setFootnotes(List footnotes) { + this.footnotes = footnotes; + return this; + } + + /** + * Get the description property: Description of the figure. + * + * @return the description value. + */ + @Generated + public String getDescription() { + return this.description; + } + + /** + * Set the description property: Description of the figure. + * + * @param description the description value to set. + * @return the DocumentFigure object itself. + */ + @Generated + DocumentFigure setDescription(String description) { + this.description = description; + return this; + } + + /** + * Get the role property: Semantic role of the figure. + * + * @return the role value. + */ + @Generated + public SemanticRole getRole() { + return this.role; + } + + /** + * Set the role property: Semantic role of the figure. + * + * @param role the role value to set. + * @return the DocumentFigure object itself. + */ + @Generated + DocumentFigure setRole(SemanticRole role) { + this.role = role; + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("id", this.id); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeArrayField("elements", this.elements, (writer, element) -> writer.writeString(element)); + jsonWriter.writeJsonField("caption", this.caption); + jsonWriter.writeArrayField("footnotes", this.footnotes, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("description", this.description); + jsonWriter.writeStringField("role", this.role == null ? null : this.role.toString()); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentFigure from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentFigure if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentFigure. + */ + @Generated + public static DocumentFigure fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String discriminatorValue = null; + try (JsonReader readerToUse = reader.bufferObject()) { + readerToUse.nextToken(); // Prepare for reading + while (readerToUse.nextToken() != JsonToken.END_OBJECT) { + String fieldName = readerToUse.getFieldName(); + readerToUse.nextToken(); + if ("kind".equals(fieldName)) { + discriminatorValue = readerToUse.getString(); + break; + } else { + readerToUse.skipChildren(); + } + } + // Use the discriminator value to determine which subtype should be deserialized. + if ("chart".equals(discriminatorValue)) { + return DocumentChartFigure.fromJson(readerToUse.reset()); + } else if ("mermaid".equals(discriminatorValue)) { + return DocumentMermaidFigure.fromJson(readerToUse.reset()); + } else { + return fromJsonKnownDiscriminator(readerToUse.reset()); + } + } + }); + } + + @Generated + static DocumentFigure fromJsonKnownDiscriminator(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String id = null; + DocumentFigureKind kind = null; + String source = null; + ContentSpan span = null; + List elements = null; + DocumentCaption caption = null; + List footnotes = null; + String description = null; + SemanticRole role = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("id".equals(fieldName)) { + id = reader.getString(); + } else if ("kind".equals(fieldName)) { + kind = DocumentFigureKind.fromString(reader.getString()); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("elements".equals(fieldName)) { + elements = reader.readArray(reader1 -> reader1.getString()); + } else if ("caption".equals(fieldName)) { + caption = DocumentCaption.fromJson(reader); + } else if ("footnotes".equals(fieldName)) { + footnotes = reader.readArray(reader1 -> DocumentFootnote.fromJson(reader1)); + } else if ("description".equals(fieldName)) { + description = reader.getString(); + } else if ("role".equals(fieldName)) { + role = SemanticRole.fromString(reader.getString()); + } else { + reader.skipChildren(); + } + } + DocumentFigure deserializedDocumentFigure = new DocumentFigure(id); + deserializedDocumentFigure.kind = kind; + deserializedDocumentFigure.source = source; + deserializedDocumentFigure.span = span; + deserializedDocumentFigure.elements = elements; + deserializedDocumentFigure.caption = caption; + deserializedDocumentFigure.footnotes = footnotes; + deserializedDocumentFigure.description = description; + deserializedDocumentFigure.role = role; + + return deserializedDocumentFigure; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFigureKind.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFigureKind.java new file mode 100644 index 000000000000..cd4c36ddfcef --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFigureKind.java @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Figure kind. + */ +public final class DocumentFigureKind extends ExpandableStringEnum { + /** + * Unknown figure kind. + */ + @Generated + public static final DocumentFigureKind UNKNOWN = fromString("unknown"); + + /** + * Figure containing a chart, such as a bar chart, line chart, or pie chart. + */ + @Generated + public static final DocumentFigureKind CHART = fromString("chart"); + + /** + * Figure containing a diagram, such as a flowchart or network diagram. + */ + @Generated + public static final DocumentFigureKind MERMAID = fromString("mermaid"); + + /** + * Creates a new instance of DocumentFigureKind value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public DocumentFigureKind() { + } + + /** + * Creates or finds a DocumentFigureKind from its string representation. + * + * @param name a name to look for. + * @return the corresponding DocumentFigureKind. + */ + @Generated + public static DocumentFigureKind fromString(String name) { + return fromString(name, DocumentFigureKind.class); + } + + /** + * Gets known DocumentFigureKind values. + * + * @return known DocumentFigureKind values. + */ + @Generated + public static Collection values() { + return values(DocumentFigureKind.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFootnote.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFootnote.java new file mode 100644 index 000000000000..0cf79743fec2 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFootnote.java @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Footnote of a table or figure. + */ +@Immutable +public final class DocumentFootnote implements JsonSerializable { + /* + * Content of the footnote. + */ + @Generated + private final String content; + + /* + * Encoded source that identifies the position of the footnote in the content. + */ + @Generated + private String source; + + /* + * Span of the footnote in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * Child elements of the footnote. + */ + @Generated + private List elements; + + /** + * Creates an instance of DocumentFootnote class. + * + * @param content the content value to set. + */ + @Generated + private DocumentFootnote(String content) { + this.content = content; + } + + /** + * Get the content property: Content of the footnote. + * + * @return the content value. + */ + @Generated + public String getContent() { + return this.content; + } + + /** + * Get the source property: Encoded source that identifies the position of the footnote in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the span property: Span of the footnote in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the elements property: Child elements of the footnote. + * + * @return the elements value. + */ + @Generated + public List getElements() { + return this.elements; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("content", this.content); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeArrayField("elements", this.elements, (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentFootnote from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentFootnote if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentFootnote. + */ + @Generated + public static DocumentFootnote fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String content = null; + String source = null; + ContentSpan span = null; + List elements = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("content".equals(fieldName)) { + content = reader.getString(); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("elements".equals(fieldName)) { + elements = reader.readArray(reader1 -> reader1.getString()); + } else { + reader.skipChildren(); + } + } + DocumentFootnote deserializedDocumentFootnote = new DocumentFootnote(content); + deserializedDocumentFootnote.source = source; + deserializedDocumentFootnote.span = span; + deserializedDocumentFootnote.elements = elements; + + return deserializedDocumentFootnote; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFormula.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFormula.java new file mode 100644 index 000000000000..f7a4bd820b58 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFormula.java @@ -0,0 +1,170 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Mathematical formula in a document. + */ +@Immutable +public final class DocumentFormula implements JsonSerializable { + /* + * Formula kind. + */ + @Generated + private final DocumentFormulaKind kind; + + /* + * LaTex expression describing the formula. + */ + @Generated + private final String value; + + /* + * Encoded source that identifies the position of the formula in the content. + */ + @Generated + private String source; + + /* + * Span of the formula in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * Confidence of predicting the formula. + */ + @Generated + private Double confidence; + + /** + * Creates an instance of DocumentFormula class. + * + * @param kind the kind value to set. + * @param value the value value to set. + */ + @Generated + private DocumentFormula(DocumentFormulaKind kind, String value) { + this.kind = kind; + this.value = value; + } + + /** + * Get the kind property: Formula kind. + * + * @return the kind value. + */ + @Generated + public DocumentFormulaKind getKind() { + return this.kind; + } + + /** + * Get the value property: LaTex expression describing the formula. + * + * @return the value value. + */ + @Generated + public String getValue() { + return this.value; + } + + /** + * Get the source property: Encoded source that identifies the position of the formula in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the span property: Span of the formula in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the confidence property: Confidence of predicting the formula. + * + * @return the confidence value. + */ + @Generated + public Double getConfidence() { + return this.confidence; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + jsonWriter.writeStringField("value", this.value); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeNumberField("confidence", this.confidence); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentFormula from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentFormula if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentFormula. + */ + @Generated + public static DocumentFormula fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + DocumentFormulaKind kind = null; + String value = null; + String source = null; + ContentSpan span = null; + Double confidence = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("kind".equals(fieldName)) { + kind = DocumentFormulaKind.fromString(reader.getString()); + } else if ("value".equals(fieldName)) { + value = reader.getString(); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("confidence".equals(fieldName)) { + confidence = reader.getNullable(JsonReader::getDouble); + } else { + reader.skipChildren(); + } + } + DocumentFormula deserializedDocumentFormula = new DocumentFormula(kind, value); + deserializedDocumentFormula.source = source; + deserializedDocumentFormula.span = span; + deserializedDocumentFormula.confidence = confidence; + + return deserializedDocumentFormula; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFormulaKind.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFormulaKind.java new file mode 100644 index 000000000000..f83aea97ef54 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentFormulaKind.java @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Formula kind. + */ +public final class DocumentFormulaKind extends ExpandableStringEnum { + /** + * A formula embedded within the content of a paragraph. + */ + @Generated + public static final DocumentFormulaKind INLINE = fromString("inline"); + + /** + * A formula in display mode that takes up an entire line. + */ + @Generated + public static final DocumentFormulaKind DISPLAY = fromString("display"); + + /** + * Creates a new instance of DocumentFormulaKind value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public DocumentFormulaKind() { + } + + /** + * Creates or finds a DocumentFormulaKind from its string representation. + * + * @param name a name to look for. + * @return the corresponding DocumentFormulaKind. + */ + @Generated + public static DocumentFormulaKind fromString(String name) { + return fromString(name, DocumentFormulaKind.class); + } + + /** + * Gets known DocumentFormulaKind values. + * + * @return known DocumentFormulaKind values. + */ + @Generated + public static Collection values() { + return values(DocumentFormulaKind.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentHyperlink.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentHyperlink.java new file mode 100644 index 000000000000..5e65e6ea4fd0 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentHyperlink.java @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Hyperlink in a document, such as a link to a web page or an email address. + */ +@Immutable +public final class DocumentHyperlink implements JsonSerializable { + /* + * Hyperlinked content. + */ + @Generated + private final String content; + + /* + * URL of the hyperlink. + */ + @Generated + private final String url; + + /* + * Span of the hyperlink in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * Position of the hyperlink. + */ + @Generated + private String source; + + /** + * Creates an instance of DocumentHyperlink class. + * + * @param content the content value to set. + * @param url the url value to set. + */ + @Generated + private DocumentHyperlink(String content, String url) { + this.content = content; + this.url = url; + } + + /** + * Get the content property: Hyperlinked content. + * + * @return the content value. + */ + @Generated + public String getContent() { + return this.content; + } + + /** + * Get the url property: URL of the hyperlink. + * + * @return the url value. + */ + @Generated + public String getUrl() { + return this.url; + } + + /** + * Get the span property: Span of the hyperlink in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the source property: Position of the hyperlink. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("content", this.content); + jsonWriter.writeStringField("url", this.url); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeStringField("source", this.source); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentHyperlink from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentHyperlink if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentHyperlink. + */ + @Generated + public static DocumentHyperlink fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String content = null; + String url = null; + ContentSpan span = null; + String source = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("content".equals(fieldName)) { + content = reader.getString(); + } else if ("url".equals(fieldName)) { + url = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else { + reader.skipChildren(); + } + } + DocumentHyperlink deserializedDocumentHyperlink = new DocumentHyperlink(content, url); + deserializedDocumentHyperlink.span = span; + deserializedDocumentHyperlink.source = source; + + return deserializedDocumentHyperlink; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentLine.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentLine.java new file mode 100644 index 000000000000..87bf9be056e9 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentLine.java @@ -0,0 +1,127 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Line in a document, consisting of an contiguous sequence of words. + */ +@Immutable +public final class DocumentLine implements JsonSerializable { + /* + * Line text. + */ + @Generated + private final String content; + + /* + * Encoded source that identifies the position of the line in the content. + */ + @Generated + private String source; + + /* + * Span of the line in the markdown content. + */ + @Generated + private ContentSpan span; + + /** + * Creates an instance of DocumentLine class. + * + * @param content the content value to set. + */ + @Generated + private DocumentLine(String content) { + this.content = content; + } + + /** + * Get the content property: Line text. + * + * @return the content value. + */ + @Generated + public String getContent() { + return this.content; + } + + /** + * Get the source property: Encoded source that identifies the position of the line in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the span property: Span of the line in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("content", this.content); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeJsonField("span", this.span); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentLine from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentLine if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentLine. + */ + @Generated + public static DocumentLine fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String content = null; + String source = null; + ContentSpan span = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("content".equals(fieldName)) { + content = reader.getString(); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else { + reader.skipChildren(); + } + } + DocumentLine deserializedDocumentLine = new DocumentLine(content); + deserializedDocumentLine.source = source; + deserializedDocumentLine.span = span; + + return deserializedDocumentLine; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentMermaidFigure.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentMermaidFigure.java new file mode 100644 index 000000000000..f1bf9dbd3b6e --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentMermaidFigure.java @@ -0,0 +1,148 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Figure containing a diagram, such as a flowchart or network diagram. + */ +@Immutable +public final class DocumentMermaidFigure extends DocumentFigure { + /* + * Figure kind. + */ + @Generated + private DocumentFigureKind kind = DocumentFigureKind.MERMAID; + + /* + * Diagram content represented using [Mermaid syntax](https://mermaid.js.org/intro/). + */ + @Generated + private final String content; + + /** + * Creates an instance of DocumentMermaidFigure class. + * + * @param id the id value to set. + * @param content the content value to set. + */ + @Generated + private DocumentMermaidFigure(String id, String content) { + super(id); + this.content = content; + } + + /** + * Get the kind property: Figure kind. + * + * @return the kind value. + */ + @Generated + @Override + public DocumentFigureKind getKind() { + return this.kind; + } + + /** + * Get the content property: Diagram content represented using [Mermaid syntax](https://mermaid.js.org/intro/). + * + * @return the content value. + */ + @Generated + public String getContent() { + return this.content; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("id", getId()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeJsonField("span", getSpan()); + jsonWriter.writeArrayField("elements", getElements(), (writer, element) -> writer.writeString(element)); + jsonWriter.writeJsonField("caption", getCaption()); + jsonWriter.writeArrayField("footnotes", getFootnotes(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("description", getDescription()); + jsonWriter.writeStringField("role", getRole() == null ? null : getRole().toString()); + jsonWriter.writeStringField("content", this.content); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentMermaidFigure from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentMermaidFigure if the JsonReader was pointing to an instance of it, or null if it + * was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentMermaidFigure. + */ + @Generated + public static DocumentMermaidFigure fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String id = null; + String source = null; + ContentSpan span = null; + List elements = null; + DocumentCaption caption = null; + List footnotes = null; + String description = null; + SemanticRole role = null; + String content = null; + DocumentFigureKind kind = DocumentFigureKind.MERMAID; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("id".equals(fieldName)) { + id = reader.getString(); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("elements".equals(fieldName)) { + elements = reader.readArray(reader1 -> reader1.getString()); + } else if ("caption".equals(fieldName)) { + caption = DocumentCaption.fromJson(reader); + } else if ("footnotes".equals(fieldName)) { + footnotes = reader.readArray(reader1 -> DocumentFootnote.fromJson(reader1)); + } else if ("description".equals(fieldName)) { + description = reader.getString(); + } else if ("role".equals(fieldName)) { + role = SemanticRole.fromString(reader.getString()); + } else if ("content".equals(fieldName)) { + content = reader.getString(); + } else if ("kind".equals(fieldName)) { + kind = DocumentFigureKind.fromString(reader.getString()); + } else { + reader.skipChildren(); + } + } + DocumentMermaidFigure deserializedDocumentMermaidFigure = new DocumentMermaidFigure(id, content); + deserializedDocumentMermaidFigure.setSource(source); + deserializedDocumentMermaidFigure.setSpan(span); + deserializedDocumentMermaidFigure.setElements(elements); + deserializedDocumentMermaidFigure.setCaption(caption); + deserializedDocumentMermaidFigure.setFootnotes(footnotes); + deserializedDocumentMermaidFigure.setDescription(description); + deserializedDocumentMermaidFigure.setRole(role); + deserializedDocumentMermaidFigure.kind = kind; + + return deserializedDocumentMermaidFigure; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentPage.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentPage.java new file mode 100644 index 000000000000..71ba355cff9e --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentPage.java @@ -0,0 +1,259 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Content from a document page. + */ +@Immutable +public final class DocumentPage implements JsonSerializable { + /* + * Page number (1-based). + */ + @Generated + private final int pageNumber; + + /* + * Width of the page. + */ + @Generated + private Double width; + + /* + * Height of the page. + */ + @Generated + private Double height; + + /* + * Span(s) associated with the page in the markdown content. + */ + @Generated + private List spans; + + /* + * The general orientation of the content in clockwise direction, + * measured in degrees between (-180, 180]. + * Only if enableOcr is true. + */ + @Generated + private Double angle; + + /* + * List of words in the page. Only if enableOcr and returnDetails are true. + */ + @Generated + private List words; + + /* + * List of lines in the page. Only if enableOcr and returnDetails are true. + */ + @Generated + private List lines; + + /* + * List of barcodes in the page. Only if enableBarcode and returnDetails are true. + */ + @Generated + private List barcodes; + + /* + * List of mathematical formulas in the page. Only if enableFormula and returnDetails are true. + */ + @Generated + private List formulas; + + /** + * Creates an instance of DocumentPage class. + * + * @param pageNumber the pageNumber value to set. + */ + @Generated + private DocumentPage(int pageNumber) { + this.pageNumber = pageNumber; + } + + /** + * Get the pageNumber property: Page number (1-based). + * + * @return the pageNumber value. + */ + @Generated + public int getPageNumber() { + return this.pageNumber; + } + + /** + * Get the width property: Width of the page. + * + * @return the width value. + */ + @Generated + public Double getWidth() { + return this.width; + } + + /** + * Get the height property: Height of the page. + * + * @return the height value. + */ + @Generated + public Double getHeight() { + return this.height; + } + + /** + * Get the spans property: Span(s) associated with the page in the markdown content. + * + * @return the spans value. + */ + @Generated + public List getSpans() { + return this.spans; + } + + /** + * Get the angle property: The general orientation of the content in clockwise direction, + * measured in degrees between (-180, 180]. + * Only if enableOcr is true. + * + * @return the angle value. + */ + @Generated + public Double getAngle() { + return this.angle; + } + + /** + * Get the words property: List of words in the page. Only if enableOcr and returnDetails are true. + * + * @return the words value. + */ + @Generated + public List getWords() { + return this.words; + } + + /** + * Get the lines property: List of lines in the page. Only if enableOcr and returnDetails are true. + * + * @return the lines value. + */ + @Generated + public List getLines() { + return this.lines; + } + + /** + * Get the barcodes property: List of barcodes in the page. Only if enableBarcode and returnDetails are true. + * + * @return the barcodes value. + */ + @Generated + public List getBarcodes() { + return this.barcodes; + } + + /** + * Get the formulas property: List of mathematical formulas in the page. Only if enableFormula and returnDetails are + * true. + * + * @return the formulas value. + */ + @Generated + public List getFormulas() { + return this.formulas; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeIntField("pageNumber", this.pageNumber); + jsonWriter.writeNumberField("width", this.width); + jsonWriter.writeNumberField("height", this.height); + jsonWriter.writeArrayField("spans", this.spans, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("angle", this.angle); + jsonWriter.writeArrayField("words", this.words, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("lines", this.lines, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("barcodes", this.barcodes, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeArrayField("formulas", this.formulas, (writer, element) -> writer.writeJson(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentPage from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentPage if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentPage. + */ + @Generated + public static DocumentPage fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + int pageNumber = 0; + Double width = null; + Double height = null; + List spans = null; + Double angle = null; + List words = null; + List lines = null; + List barcodes = null; + List formulas = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("pageNumber".equals(fieldName)) { + pageNumber = reader.getInt(); + } else if ("width".equals(fieldName)) { + width = reader.getNullable(JsonReader::getDouble); + } else if ("height".equals(fieldName)) { + height = reader.getNullable(JsonReader::getDouble); + } else if ("spans".equals(fieldName)) { + spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + } else if ("angle".equals(fieldName)) { + angle = reader.getNullable(JsonReader::getDouble); + } else if ("words".equals(fieldName)) { + words = reader.readArray(reader1 -> DocumentWord.fromJson(reader1)); + } else if ("lines".equals(fieldName)) { + lines = reader.readArray(reader1 -> DocumentLine.fromJson(reader1)); + } else if ("barcodes".equals(fieldName)) { + barcodes = reader.readArray(reader1 -> DocumentBarcode.fromJson(reader1)); + } else if ("formulas".equals(fieldName)) { + formulas = reader.readArray(reader1 -> DocumentFormula.fromJson(reader1)); + } else { + reader.skipChildren(); + } + } + DocumentPage deserializedDocumentPage = new DocumentPage(pageNumber); + deserializedDocumentPage.width = width; + deserializedDocumentPage.height = height; + deserializedDocumentPage.spans = spans; + deserializedDocumentPage.angle = angle; + deserializedDocumentPage.words = words; + deserializedDocumentPage.lines = lines; + deserializedDocumentPage.barcodes = barcodes; + deserializedDocumentPage.formulas = formulas; + + return deserializedDocumentPage; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentParagraph.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentParagraph.java new file mode 100644 index 000000000000..98a567482483 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentParagraph.java @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Paragraph in a document, generally consisting of an contiguous sequence of lines + * with common alignment and spacing. + */ +@Immutable +public final class DocumentParagraph implements JsonSerializable { + /* + * Semantic role of the paragraph. + */ + @Generated + private SemanticRole role; + + /* + * Paragraph text. + */ + @Generated + private final String content; + + /* + * Encoded source that identifies the position of the paragraph in the content. + */ + @Generated + private String source; + + /* + * Span of the paragraph in the markdown content. + */ + @Generated + private ContentSpan span; + + /** + * Creates an instance of DocumentParagraph class. + * + * @param content the content value to set. + */ + @Generated + private DocumentParagraph(String content) { + this.content = content; + } + + /** + * Get the role property: Semantic role of the paragraph. + * + * @return the role value. + */ + @Generated + public SemanticRole getRole() { + return this.role; + } + + /** + * Get the content property: Paragraph text. + * + * @return the content value. + */ + @Generated + public String getContent() { + return this.content; + } + + /** + * Get the source property: Encoded source that identifies the position of the paragraph in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the span property: Span of the paragraph in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("content", this.content); + jsonWriter.writeStringField("role", this.role == null ? null : this.role.toString()); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeJsonField("span", this.span); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentParagraph from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentParagraph if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentParagraph. + */ + @Generated + public static DocumentParagraph fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String content = null; + SemanticRole role = null; + String source = null; + ContentSpan span = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("content".equals(fieldName)) { + content = reader.getString(); + } else if ("role".equals(fieldName)) { + role = SemanticRole.fromString(reader.getString()); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else { + reader.skipChildren(); + } + } + DocumentParagraph deserializedDocumentParagraph = new DocumentParagraph(content); + deserializedDocumentParagraph.role = role; + deserializedDocumentParagraph.source = source; + deserializedDocumentParagraph.span = span; + + return deserializedDocumentParagraph; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentSection.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentSection.java new file mode 100644 index 000000000000..29ea3951455c --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentSection.java @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Section in a document. + */ +@Immutable +public final class DocumentSection implements JsonSerializable { + /* + * Span of the section in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * Child elements of the section. + */ + @Generated + private List elements; + + /** + * Creates an instance of DocumentSection class. + */ + @Generated + private DocumentSection() { + } + + /** + * Get the span property: Span of the section in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the elements property: Child elements of the section. + * + * @return the elements value. + */ + @Generated + public List getElements() { + return this.elements; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeArrayField("elements", this.elements, (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentSection from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentSection if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the DocumentSection. + */ + @Generated + public static DocumentSection fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + DocumentSection deserializedDocumentSection = new DocumentSection(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("span".equals(fieldName)) { + deserializedDocumentSection.span = ContentSpan.fromJson(reader); + } else if ("elements".equals(fieldName)) { + List elements = reader.readArray(reader1 -> reader1.getString()); + deserializedDocumentSection.elements = elements; + } else { + reader.skipChildren(); + } + } + + return deserializedDocumentSection; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentTable.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentTable.java new file mode 100644 index 000000000000..1542b45921b1 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentTable.java @@ -0,0 +1,235 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Table in a document, consisting table cells arranged in a rectangular layout. + */ +@Immutable +public final class DocumentTable implements JsonSerializable { + /* + * Number of rows in the table. + */ + @Generated + private final int rowCount; + + /* + * Number of columns in the table. + */ + @Generated + private final int columnCount; + + /* + * Cells contained within the table. + */ + @Generated + private final List cells; + + /* + * Encoded source that identifies the position of the table in the content. + */ + @Generated + private String source; + + /* + * Span of the table in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * Table caption. + */ + @Generated + private DocumentCaption caption; + + /* + * List of table footnotes. + */ + @Generated + private List footnotes; + + /* + * Semantic role of the table. + */ + @Generated + private SemanticRole role; + + /** + * Creates an instance of DocumentTable class. + * + * @param rowCount the rowCount value to set. + * @param columnCount the columnCount value to set. + * @param cells the cells value to set. + */ + @Generated + private DocumentTable(int rowCount, int columnCount, List cells) { + this.rowCount = rowCount; + this.columnCount = columnCount; + this.cells = cells; + } + + /** + * Get the rowCount property: Number of rows in the table. + * + * @return the rowCount value. + */ + @Generated + public int getRowCount() { + return this.rowCount; + } + + /** + * Get the columnCount property: Number of columns in the table. + * + * @return the columnCount value. + */ + @Generated + public int getColumnCount() { + return this.columnCount; + } + + /** + * Get the cells property: Cells contained within the table. + * + * @return the cells value. + */ + @Generated + public List getCells() { + return this.cells; + } + + /** + * Get the source property: Encoded source that identifies the position of the table in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the span property: Span of the table in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the caption property: Table caption. + * + * @return the caption value. + */ + @Generated + public DocumentCaption getCaption() { + return this.caption; + } + + /** + * Get the footnotes property: List of table footnotes. + * + * @return the footnotes value. + */ + @Generated + public List getFootnotes() { + return this.footnotes; + } + + /** + * Get the role property: Semantic role of the table. + * + * @return the role value. + */ + @Generated + public SemanticRole getRole() { + return this.role; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeIntField("rowCount", this.rowCount); + jsonWriter.writeIntField("columnCount", this.columnCount); + jsonWriter.writeArrayField("cells", this.cells, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeJsonField("caption", this.caption); + jsonWriter.writeArrayField("footnotes", this.footnotes, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("role", this.role == null ? null : this.role.toString()); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentTable from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentTable if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentTable. + */ + @Generated + public static DocumentTable fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + int rowCount = 0; + int columnCount = 0; + List cells = null; + String source = null; + ContentSpan span = null; + DocumentCaption caption = null; + List footnotes = null; + SemanticRole role = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("rowCount".equals(fieldName)) { + rowCount = reader.getInt(); + } else if ("columnCount".equals(fieldName)) { + columnCount = reader.getInt(); + } else if ("cells".equals(fieldName)) { + cells = reader.readArray(reader1 -> DocumentTableCell.fromJson(reader1)); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("caption".equals(fieldName)) { + caption = DocumentCaption.fromJson(reader); + } else if ("footnotes".equals(fieldName)) { + footnotes = reader.readArray(reader1 -> DocumentFootnote.fromJson(reader1)); + } else if ("role".equals(fieldName)) { + role = SemanticRole.fromString(reader.getString()); + } else { + reader.skipChildren(); + } + } + DocumentTable deserializedDocumentTable = new DocumentTable(rowCount, columnCount, cells); + deserializedDocumentTable.source = source; + deserializedDocumentTable.span = span; + deserializedDocumentTable.caption = caption; + deserializedDocumentTable.footnotes = footnotes; + deserializedDocumentTable.role = role; + + return deserializedDocumentTable; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentTableCell.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentTableCell.java new file mode 100644 index 000000000000..51a05538a977 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentTableCell.java @@ -0,0 +1,256 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Table cell in a document table. + */ +@Immutable +public final class DocumentTableCell implements JsonSerializable { + /* + * Table cell kind. + */ + @Generated + private DocumentTableCellKind kind; + + /* + * Row index of the cell. + */ + @Generated + private final int rowIndex; + + /* + * Column index of the cell. + */ + @Generated + private final int columnIndex; + + /* + * Number of rows spanned by this cell. + */ + @Generated + private Integer rowSpan; + + /* + * Number of columns spanned by this cell. + */ + @Generated + private Integer columnSpan; + + /* + * Content of the table cell. + */ + @Generated + private final String content; + + /* + * Encoded source that identifies the position of the table cell in the content. + */ + @Generated + private String source; + + /* + * Span of the table cell in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * Child elements of the table cell. + */ + @Generated + private List elements; + + /** + * Creates an instance of DocumentTableCell class. + * + * @param rowIndex the rowIndex value to set. + * @param columnIndex the columnIndex value to set. + * @param content the content value to set. + */ + @Generated + private DocumentTableCell(int rowIndex, int columnIndex, String content) { + this.rowIndex = rowIndex; + this.columnIndex = columnIndex; + this.content = content; + } + + /** + * Get the kind property: Table cell kind. + * + * @return the kind value. + */ + @Generated + public DocumentTableCellKind getKind() { + return this.kind; + } + + /** + * Get the rowIndex property: Row index of the cell. + * + * @return the rowIndex value. + */ + @Generated + public int getRowIndex() { + return this.rowIndex; + } + + /** + * Get the columnIndex property: Column index of the cell. + * + * @return the columnIndex value. + */ + @Generated + public int getColumnIndex() { + return this.columnIndex; + } + + /** + * Get the rowSpan property: Number of rows spanned by this cell. + * + * @return the rowSpan value. + */ + @Generated + public Integer getRowSpan() { + return this.rowSpan; + } + + /** + * Get the columnSpan property: Number of columns spanned by this cell. + * + * @return the columnSpan value. + */ + @Generated + public Integer getColumnSpan() { + return this.columnSpan; + } + + /** + * Get the content property: Content of the table cell. + * + * @return the content value. + */ + @Generated + public String getContent() { + return this.content; + } + + /** + * Get the source property: Encoded source that identifies the position of the table cell in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the span property: Span of the table cell in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the elements property: Child elements of the table cell. + * + * @return the elements value. + */ + @Generated + public List getElements() { + return this.elements; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeIntField("rowIndex", this.rowIndex); + jsonWriter.writeIntField("columnIndex", this.columnIndex); + jsonWriter.writeStringField("content", this.content); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + jsonWriter.writeNumberField("rowSpan", this.rowSpan); + jsonWriter.writeNumberField("columnSpan", this.columnSpan); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeArrayField("elements", this.elements, (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentTableCell from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentTableCell if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentTableCell. + */ + @Generated + public static DocumentTableCell fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + int rowIndex = 0; + int columnIndex = 0; + String content = null; + DocumentTableCellKind kind = null; + Integer rowSpan = null; + Integer columnSpan = null; + String source = null; + ContentSpan span = null; + List elements = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("rowIndex".equals(fieldName)) { + rowIndex = reader.getInt(); + } else if ("columnIndex".equals(fieldName)) { + columnIndex = reader.getInt(); + } else if ("content".equals(fieldName)) { + content = reader.getString(); + } else if ("kind".equals(fieldName)) { + kind = DocumentTableCellKind.fromString(reader.getString()); + } else if ("rowSpan".equals(fieldName)) { + rowSpan = reader.getNullable(JsonReader::getInt); + } else if ("columnSpan".equals(fieldName)) { + columnSpan = reader.getNullable(JsonReader::getInt); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("elements".equals(fieldName)) { + elements = reader.readArray(reader1 -> reader1.getString()); + } else { + reader.skipChildren(); + } + } + DocumentTableCell deserializedDocumentTableCell = new DocumentTableCell(rowIndex, columnIndex, content); + deserializedDocumentTableCell.kind = kind; + deserializedDocumentTableCell.rowSpan = rowSpan; + deserializedDocumentTableCell.columnSpan = columnSpan; + deserializedDocumentTableCell.source = source; + deserializedDocumentTableCell.span = span; + deserializedDocumentTableCell.elements = elements; + + return deserializedDocumentTableCell; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentTableCellKind.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentTableCellKind.java new file mode 100644 index 000000000000..dd57b886adce --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentTableCellKind.java @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Table cell kind. + */ +public final class DocumentTableCellKind extends ExpandableStringEnum { + /** + * Main content/data. + */ + @Generated + public static final DocumentTableCellKind CONTENT = fromString("content"); + + /** + * Description of the row content. + */ + @Generated + public static final DocumentTableCellKind ROW_HEADER = fromString("rowHeader"); + + /** + * Description the column content. + */ + @Generated + public static final DocumentTableCellKind COLUMN_HEADER = fromString("columnHeader"); + + /** + * Description of the row headers, usually located at the top left corner of a table. + */ + @Generated + public static final DocumentTableCellKind STUB_HEAD = fromString("stubHead"); + + /** + * Description of the content in (parts of) the table. + */ + @Generated + public static final DocumentTableCellKind DESCRIPTION = fromString("description"); + + /** + * Creates a new instance of DocumentTableCellKind value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public DocumentTableCellKind() { + } + + /** + * Creates or finds a DocumentTableCellKind from its string representation. + * + * @param name a name to look for. + * @return the corresponding DocumentTableCellKind. + */ + @Generated + public static DocumentTableCellKind fromString(String name) { + return fromString(name, DocumentTableCellKind.class); + } + + /** + * Gets known DocumentTableCellKind values. + * + * @return known DocumentTableCellKind values. + */ + @Generated + public static Collection values() { + return values(DocumentTableCellKind.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentWord.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentWord.java new file mode 100644 index 000000000000..4c898fcb5803 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/DocumentWord.java @@ -0,0 +1,150 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Word in a document, consisting of a contiguous sequence of characters. + * For non-space delimited languages, such as Chinese, Japanese, and Korean, + * each character is represented as its own word. + */ +@Immutable +public final class DocumentWord implements JsonSerializable { + /* + * Word text. + */ + @Generated + private final String content; + + /* + * Encoded source that identifies the position of the word in the content. + */ + @Generated + private String source; + + /* + * Span of the word in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * Confidence of predicting the word. + */ + @Generated + private Double confidence; + + /** + * Creates an instance of DocumentWord class. + * + * @param content the content value to set. + */ + @Generated + private DocumentWord(String content) { + this.content = content; + } + + /** + * Get the content property: Word text. + * + * @return the content value. + */ + @Generated + public String getContent() { + return this.content; + } + + /** + * Get the source property: Encoded source that identifies the position of the word in the content. + * + * @return the source value. + */ + @Generated + public String getSource() { + return this.source; + } + + /** + * Get the span property: Span of the word in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the confidence property: Confidence of predicting the word. + * + * @return the confidence value. + */ + @Generated + public Double getConfidence() { + return this.confidence; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("content", this.content); + jsonWriter.writeStringField("source", this.source); + jsonWriter.writeJsonField("span", this.span); + jsonWriter.writeNumberField("confidence", this.confidence); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DocumentWord from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DocumentWord if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DocumentWord. + */ + @Generated + public static DocumentWord fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String content = null; + String source = null; + ContentSpan span = null; + Double confidence = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("content".equals(fieldName)) { + content = reader.getString(); + } else if ("source".equals(fieldName)) { + source = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else if ("confidence".equals(fieldName)) { + confidence = reader.getNullable(JsonReader::getDouble); + } else { + reader.skipChildren(); + } + } + DocumentWord deserializedDocumentWord = new DocumentWord(content); + deserializedDocumentWord.source = source; + deserializedDocumentWord.span = span; + deserializedDocumentWord.confidence = confidence; + + return deserializedDocumentWord; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/GenerationMethod.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/GenerationMethod.java new file mode 100644 index 000000000000..f5b5fd08c53b --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/GenerationMethod.java @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Generation method. + */ +public final class GenerationMethod extends ExpandableStringEnum { + /** + * Values are generated freely based on the content. + */ + @Generated + public static final GenerationMethod GENERATE = fromString("generate"); + + /** + * Values are extracted as they appear in the content. + */ + @Generated + public static final GenerationMethod EXTRACT = fromString("extract"); + + /** + * Values are classified against a predefined set of categories. + */ + @Generated + public static final GenerationMethod CLASSIFY = fromString("classify"); + + /** + * Creates a new instance of GenerationMethod value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public GenerationMethod() { + } + + /** + * Creates or finds a GenerationMethod from its string representation. + * + * @param name a name to look for. + * @return the corresponding GenerationMethod. + */ + @Generated + public static GenerationMethod fromString(String name) { + return fromString(name, GenerationMethod.class); + } + + /** + * Gets known GenerationMethod values. + * + * @return known GenerationMethod values. + */ + @Generated + public static Collection values() { + return values(GenerationMethod.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/IntegerField.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/IntegerField.java new file mode 100644 index 000000000000..2625923c42a5 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/IntegerField.java @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Integer field extracted from the content. + */ +@Immutable +public final class IntegerField extends ContentField { + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type = ContentFieldType.INTEGER; + + /* + * Integer field value. + */ + @Generated + private Long valueInteger; + + /** + * Creates an instance of IntegerField class. + */ + @Generated + private IntegerField() { + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + @Override + public ContentFieldType getType() { + return this.type; + } + + /** + * Get the valueInteger property: Integer field value. + * + * @return the valueInteger value. + */ + @Generated + public Long getValueInteger() { + return this.valueInteger; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("spans", getSpans(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("confidence", getConfidence()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeNumberField("valueInteger", this.valueInteger); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of IntegerField from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of IntegerField if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the IntegerField. + */ + @Generated + public static IntegerField fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + IntegerField deserializedIntegerField = new IntegerField(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("spans".equals(fieldName)) { + List spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + deserializedIntegerField.setSpans(spans); + } else if ("confidence".equals(fieldName)) { + deserializedIntegerField.setConfidence(reader.getNullable(JsonReader::getDouble)); + } else if ("source".equals(fieldName)) { + deserializedIntegerField.setSource(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedIntegerField.type = ContentFieldType.fromString(reader.getString()); + } else if ("valueInteger".equals(fieldName)) { + deserializedIntegerField.valueInteger = reader.getNullable(JsonReader::getLong); + } else { + reader.skipChildren(); + } + } + + return deserializedIntegerField; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/JsonField.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/JsonField.java new file mode 100644 index 000000000000..df5617a1d2c9 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/JsonField.java @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.core.util.BinaryData; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * JSON field extracted from the content. + */ +@Immutable +public final class JsonField extends ContentField { + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type = ContentFieldType.JSON; + + /* + * JSON field value. + */ + @Generated + private BinaryData valueJson; + + /** + * Creates an instance of JsonField class. + */ + @Generated + private JsonField() { + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + @Override + public ContentFieldType getType() { + return this.type; + } + + /** + * Get the valueJson property: JSON field value. + * + * @return the valueJson value. + */ + @Generated + public BinaryData getValueJson() { + return this.valueJson; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("spans", getSpans(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("confidence", getConfidence()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + if (this.valueJson != null) { + jsonWriter.writeFieldName("valueJson"); + this.valueJson.writeTo(jsonWriter); + } + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of JsonField from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of JsonField if the JsonReader was pointing to an instance of it, or null if it was pointing + * to JSON null. + * @throws IOException If an error occurs while reading the JsonField. + */ + @Generated + public static JsonField fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + JsonField deserializedJsonField = new JsonField(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("spans".equals(fieldName)) { + List spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + deserializedJsonField.setSpans(spans); + } else if ("confidence".equals(fieldName)) { + deserializedJsonField.setConfidence(reader.getNullable(JsonReader::getDouble)); + } else if ("source".equals(fieldName)) { + deserializedJsonField.setSource(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedJsonField.type = ContentFieldType.fromString(reader.getString()); + } else if ("valueJson".equals(fieldName)) { + deserializedJsonField.valueJson + = reader.getNullable(nonNullReader -> BinaryData.fromObject(nonNullReader.readUntyped())); + } else { + reader.skipChildren(); + } + } + + return deserializedJsonField; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/KnowledgeSource.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/KnowledgeSource.java new file mode 100644 index 000000000000..610d83807240 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/KnowledgeSource.java @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.ai.contentunderstanding.implementation.JsonMergePatchHelper; +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +/** + * Knowledge source. + */ +@Immutable +public class KnowledgeSource implements JsonSerializable { + /* + * The kind of knowledge source. + */ + @Generated + private KnowledgeSourceKind kind = KnowledgeSourceKind.fromString("KnowledgeSource"); + + /** + * Stores updated model property, the value is property name, not serialized name. + */ + @Generated + private final Set updatedProperties = new HashSet<>(); + + @Generated + private boolean jsonMergePatch; + + @Generated + private void serializeAsJsonMergePatch(boolean jsonMergePatch) { + this.jsonMergePatch = jsonMergePatch; + } + + static { + JsonMergePatchHelper.setKnowledgeSourceAccessor(new JsonMergePatchHelper.KnowledgeSourceAccessor() { + @Override + public KnowledgeSource prepareModelForJsonMergePatch(KnowledgeSource model, boolean jsonMergePatchEnabled) { + model.serializeAsJsonMergePatch(jsonMergePatchEnabled); + return model; + } + + @Override + public boolean isJsonMergePatch(KnowledgeSource model) { + return model.jsonMergePatch; + } + }); + } + + /** + * Creates an instance of KnowledgeSource class. + */ + @Generated + public KnowledgeSource() { + } + + /** + * Get the kind property: The kind of knowledge source. + * + * @return the kind value. + */ + @Generated + public KnowledgeSourceKind getKind() { + return this.kind; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + if (jsonMergePatch) { + return toJsonMergePatch(jsonWriter); + } else { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + return jsonWriter.writeEndObject(); + } + } + + @Generated + private JsonWriter toJsonMergePatch(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("kind", this.kind.toString()); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of KnowledgeSource from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of KnowledgeSource if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the KnowledgeSource. + */ + @Generated + public static KnowledgeSource fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String discriminatorValue = null; + try (JsonReader readerToUse = reader.bufferObject()) { + readerToUse.nextToken(); // Prepare for reading + while (readerToUse.nextToken() != JsonToken.END_OBJECT) { + String fieldName = readerToUse.getFieldName(); + readerToUse.nextToken(); + if ("kind".equals(fieldName)) { + discriminatorValue = readerToUse.getString(); + break; + } else { + readerToUse.skipChildren(); + } + } + // Use the discriminator value to determine which subtype should be deserialized. + if ("labeledData".equals(discriminatorValue)) { + return LabeledDataKnowledgeSource.fromJson(readerToUse.reset()); + } else { + return fromJsonKnownDiscriminator(readerToUse.reset()); + } + } + }); + } + + @Generated + static KnowledgeSource fromJsonKnownDiscriminator(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + KnowledgeSource deserializedKnowledgeSource = new KnowledgeSource(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("kind".equals(fieldName)) { + deserializedKnowledgeSource.kind = KnowledgeSourceKind.fromString(reader.getString()); + } else { + reader.skipChildren(); + } + } + + return deserializedKnowledgeSource; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/KnowledgeSourceKind.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/KnowledgeSourceKind.java new file mode 100644 index 000000000000..958100d7378d --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/KnowledgeSourceKind.java @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Knowledge source kind. + */ +public final class KnowledgeSourceKind extends ExpandableStringEnum { + /** + * A labeled data knowledge source. + */ + @Generated + public static final KnowledgeSourceKind LABELED_DATA = fromString("labeledData"); + + /** + * Creates a new instance of KnowledgeSourceKind value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public KnowledgeSourceKind() { + } + + /** + * Creates or finds a KnowledgeSourceKind from its string representation. + * + * @param name a name to look for. + * @return the corresponding KnowledgeSourceKind. + */ + @Generated + public static KnowledgeSourceKind fromString(String name) { + return fromString(name, KnowledgeSourceKind.class); + } + + /** + * Gets known KnowledgeSourceKind values. + * + * @return known KnowledgeSourceKind values. + */ + @Generated + public static Collection values() { + return values(KnowledgeSourceKind.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/LabeledDataKnowledgeSource.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/LabeledDataKnowledgeSource.java new file mode 100644 index 000000000000..cbdb594661dc --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/LabeledDataKnowledgeSource.java @@ -0,0 +1,219 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.ai.contentunderstanding.implementation.JsonMergePatchHelper; +import com.azure.core.annotation.Fluent; +import com.azure.core.annotation.Generated; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +/** + * Labeled data knowledge source. + */ +@Fluent +public final class LabeledDataKnowledgeSource extends KnowledgeSource { + /* + * The kind of knowledge source. + */ + @Generated + private KnowledgeSourceKind kind = KnowledgeSourceKind.LABELED_DATA; + + /* + * The URL of the blob container containing labeled data. + */ + @Generated + private String containerUrl; + + /* + * An optional prefix to filter blobs within the container. + */ + @Generated + private String prefix; + + /* + * An optional path to a file listing specific blobs to include. + */ + @Generated + private String fileListPath; + + /** + * Stores updated model property, the value is property name, not serialized name. + */ + @Generated + private final Set updatedProperties = new HashSet<>(); + + /** + * Creates an instance of LabeledDataKnowledgeSource class. + */ + @Generated + public LabeledDataKnowledgeSource() { + } + + /** + * Get the kind property: The kind of knowledge source. + * + * @return the kind value. + */ + @Generated + @Override + public KnowledgeSourceKind getKind() { + return this.kind; + } + + /** + * Get the containerUrl property: The URL of the blob container containing labeled data. + * + * @return the containerUrl value. + */ + @Generated + public String getContainerUrl() { + return this.containerUrl; + } + + /** + * Set the containerUrl property: The URL of the blob container containing labeled data. + *

Required when create the resource.

+ * + * @param containerUrl the containerUrl value to set. + * @return the LabeledDataKnowledgeSource object itself. + */ + @Generated + public LabeledDataKnowledgeSource setContainerUrl(String containerUrl) { + this.containerUrl = containerUrl; + this.updatedProperties.add("containerUrl"); + return this; + } + + /** + * Get the prefix property: An optional prefix to filter blobs within the container. + * + * @return the prefix value. + */ + @Generated + public String getPrefix() { + return this.prefix; + } + + /** + * Set the prefix property: An optional prefix to filter blobs within the container. + * + * @param prefix the prefix value to set. + * @return the LabeledDataKnowledgeSource object itself. + */ + @Generated + public LabeledDataKnowledgeSource setPrefix(String prefix) { + this.prefix = prefix; + this.updatedProperties.add("prefix"); + return this; + } + + /** + * Get the fileListPath property: An optional path to a file listing specific blobs to include. + * + * @return the fileListPath value. + */ + @Generated + public String getFileListPath() { + return this.fileListPath; + } + + /** + * Set the fileListPath property: An optional path to a file listing specific blobs to include. + *

Required when create the resource.

+ * + * @param fileListPath the fileListPath value to set. + * @return the LabeledDataKnowledgeSource object itself. + */ + @Generated + public LabeledDataKnowledgeSource setFileListPath(String fileListPath) { + this.fileListPath = fileListPath; + this.updatedProperties.add("fileListPath"); + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + if (JsonMergePatchHelper.getKnowledgeSourceAccessor().isJsonMergePatch(this)) { + return toJsonMergePatch(jsonWriter); + } else { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + jsonWriter.writeStringField("containerUrl", this.containerUrl); + jsonWriter.writeStringField("prefix", this.prefix); + jsonWriter.writeStringField("fileListPath", this.fileListPath); + return jsonWriter.writeEndObject(); + } + } + + @Generated + private JsonWriter toJsonMergePatch(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("kind", this.kind.toString()); + if (updatedProperties.contains("containerUrl")) { + if (this.containerUrl == null) { + jsonWriter.writeNullField("containerUrl"); + } else { + jsonWriter.writeStringField("containerUrl", this.containerUrl); + } + } + if (updatedProperties.contains("prefix")) { + if (this.prefix == null) { + jsonWriter.writeNullField("prefix"); + } else { + jsonWriter.writeStringField("prefix", this.prefix); + } + } + if (updatedProperties.contains("fileListPath")) { + if (this.fileListPath == null) { + jsonWriter.writeNullField("fileListPath"); + } else { + jsonWriter.writeStringField("fileListPath", this.fileListPath); + } + } + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of LabeledDataKnowledgeSource from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of LabeledDataKnowledgeSource if the JsonReader was pointing to an instance of it, or null if + * it was pointing to JSON null. + * @throws IOException If an error occurs while reading the LabeledDataKnowledgeSource. + */ + @Generated + public static LabeledDataKnowledgeSource fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + LabeledDataKnowledgeSource deserializedLabeledDataKnowledgeSource = new LabeledDataKnowledgeSource(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("kind".equals(fieldName)) { + deserializedLabeledDataKnowledgeSource.kind = KnowledgeSourceKind.fromString(reader.getString()); + } else if ("containerUrl".equals(fieldName)) { + deserializedLabeledDataKnowledgeSource.containerUrl = reader.getString(); + } else if ("prefix".equals(fieldName)) { + deserializedLabeledDataKnowledgeSource.prefix = reader.getString(); + } else if ("fileListPath".equals(fieldName)) { + deserializedLabeledDataKnowledgeSource.fileListPath = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedLabeledDataKnowledgeSource; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/LengthUnit.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/LengthUnit.java new file mode 100644 index 000000000000..05a038ad29aa --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/LengthUnit.java @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Length unit used by the width, height, and source properties. + */ +public final class LengthUnit extends ExpandableStringEnum { + /** + * Pixel unit. + */ + @Generated + public static final LengthUnit PIXEL = fromString("pixel"); + + /** + * Inch unit. + */ + @Generated + public static final LengthUnit INCH = fromString("inch"); + + /** + * Creates a new instance of LengthUnit value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public LengthUnit() { + } + + /** + * Creates or finds a LengthUnit from its string representation. + * + * @param name a name to look for. + * @return the corresponding LengthUnit. + */ + @Generated + public static LengthUnit fromString(String name) { + return fromString(name, LengthUnit.class); + } + + /** + * Gets known LengthUnit values. + * + * @return known LengthUnit values. + */ + @Generated + public static Collection values() { + return values(LengthUnit.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/MediaContent.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/MediaContent.java new file mode 100644 index 000000000000..7007846f357c --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/MediaContent.java @@ -0,0 +1,300 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.Map; + +/** + * Media content base class. + */ +@Immutable +public class MediaContent implements JsonSerializable { + /* + * Content kind. + */ + @Generated + private MediaContentKind kind = MediaContentKind.fromString("MediaContent"); + + /* + * Detected MIME type of the content. Ex. application/pdf, image/jpeg, etc. + */ + @Generated + private final String mimeType; + + /* + * The analyzer that generated this content. + */ + @Generated + private String analyzerId; + + /* + * Classified content category. + */ + @Generated + private String category; + + /* + * The path of the content in the input. + */ + @Generated + private String path; + + /* + * Markdown representation of the content. + */ + @Generated + private String markdown; + + /* + * Extracted fields from the content. + */ + @Generated + private Map fields; + + /** + * Creates an instance of MediaContent class. + * + * @param mimeType the mimeType value to set. + */ + @Generated + protected MediaContent(String mimeType) { + this.mimeType = mimeType; + } + + /** + * Get the kind property: Content kind. + * + * @return the kind value. + */ + @Generated + public MediaContentKind getKind() { + return this.kind; + } + + /** + * Get the mimeType property: Detected MIME type of the content. Ex. application/pdf, image/jpeg, etc. + * + * @return the mimeType value. + */ + @Generated + public String getMimeType() { + return this.mimeType; + } + + /** + * Get the analyzerId property: The analyzer that generated this content. + * + * @return the analyzerId value. + */ + @Generated + public String getAnalyzerId() { + return this.analyzerId; + } + + /** + * Set the analyzerId property: The analyzer that generated this content. + * + * @param analyzerId the analyzerId value to set. + * @return the MediaContent object itself. + */ + @Generated + MediaContent setAnalyzerId(String analyzerId) { + this.analyzerId = analyzerId; + return this; + } + + /** + * Get the category property: Classified content category. + * + * @return the category value. + */ + @Generated + public String getCategory() { + return this.category; + } + + /** + * Set the category property: Classified content category. + * + * @param category the category value to set. + * @return the MediaContent object itself. + */ + @Generated + MediaContent setCategory(String category) { + this.category = category; + return this; + } + + /** + * Get the path property: The path of the content in the input. + * + * @return the path value. + */ + @Generated + public String getPath() { + return this.path; + } + + /** + * Set the path property: The path of the content in the input. + * + * @param path the path value to set. + * @return the MediaContent object itself. + */ + @Generated + MediaContent setPath(String path) { + this.path = path; + return this; + } + + /** + * Get the markdown property: Markdown representation of the content. + * + * @return the markdown value. + */ + @Generated + public String getMarkdown() { + return this.markdown; + } + + /** + * Set the markdown property: Markdown representation of the content. + * + * @param markdown the markdown value to set. + * @return the MediaContent object itself. + */ + @Generated + MediaContent setMarkdown(String markdown) { + this.markdown = markdown; + return this; + } + + /** + * Get the fields property: Extracted fields from the content. + * + * @return the fields value. + */ + @Generated + public Map getFields() { + return this.fields; + } + + /** + * Set the fields property: Extracted fields from the content. + * + * @param fields the fields value to set. + * @return the MediaContent object itself. + */ + @Generated + MediaContent setFields(Map fields) { + this.fields = fields; + return this; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("mimeType", this.mimeType); + jsonWriter.writeStringField("kind", this.kind == null ? null : this.kind.toString()); + jsonWriter.writeStringField("analyzerId", this.analyzerId); + jsonWriter.writeStringField("category", this.category); + jsonWriter.writeStringField("path", this.path); + jsonWriter.writeStringField("markdown", this.markdown); + jsonWriter.writeMapField("fields", this.fields, (writer, element) -> writer.writeJson(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of MediaContent from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of MediaContent if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the MediaContent. + */ + @Generated + public static MediaContent fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String discriminatorValue = null; + try (JsonReader readerToUse = reader.bufferObject()) { + readerToUse.nextToken(); // Prepare for reading + while (readerToUse.nextToken() != JsonToken.END_OBJECT) { + String fieldName = readerToUse.getFieldName(); + readerToUse.nextToken(); + if ("kind".equals(fieldName)) { + discriminatorValue = readerToUse.getString(); + break; + } else { + readerToUse.skipChildren(); + } + } + // Use the discriminator value to determine which subtype should be deserialized. + if ("document".equals(discriminatorValue)) { + return DocumentContent.fromJson(readerToUse.reset()); + } else if ("audioVisual".equals(discriminatorValue)) { + return AudioVisualContent.fromJson(readerToUse.reset()); + } else { + return fromJsonKnownDiscriminator(readerToUse.reset()); + } + } + }); + } + + @Generated + static MediaContent fromJsonKnownDiscriminator(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String mimeType = null; + MediaContentKind kind = null; + String analyzerId = null; + String category = null; + String path = null; + String markdown = null; + Map fields = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("mimeType".equals(fieldName)) { + mimeType = reader.getString(); + } else if ("kind".equals(fieldName)) { + kind = MediaContentKind.fromString(reader.getString()); + } else if ("analyzerId".equals(fieldName)) { + analyzerId = reader.getString(); + } else if ("category".equals(fieldName)) { + category = reader.getString(); + } else if ("path".equals(fieldName)) { + path = reader.getString(); + } else if ("markdown".equals(fieldName)) { + markdown = reader.getString(); + } else if ("fields".equals(fieldName)) { + fields = reader.readMap(reader1 -> ContentField.fromJson(reader1)); + } else { + reader.skipChildren(); + } + } + MediaContent deserializedMediaContent = new MediaContent(mimeType); + deserializedMediaContent.kind = kind; + deserializedMediaContent.analyzerId = analyzerId; + deserializedMediaContent.category = category; + deserializedMediaContent.path = path; + deserializedMediaContent.markdown = markdown; + deserializedMediaContent.fields = fields; + + return deserializedMediaContent; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/MediaContentKind.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/MediaContentKind.java new file mode 100644 index 000000000000..dd187ea42c4c --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/MediaContentKind.java @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Kind of media content. + */ +public final class MediaContentKind extends ExpandableStringEnum { + /** + * Document content, such as pdf, image, txt, etc. + */ + @Generated + public static final MediaContentKind DOCUMENT = fromString("document"); + + /** + * Audio visual content, such as mp3, mp4, etc. + */ + @Generated + public static final MediaContentKind AUDIO_VISUAL = fromString("audioVisual"); + + /** + * Creates a new instance of MediaContentKind value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public MediaContentKind() { + } + + /** + * Creates or finds a MediaContentKind from its string representation. + * + * @param name a name to look for. + * @return the corresponding MediaContentKind. + */ + @Generated + public static MediaContentKind fromString(String name) { + return fromString(name, MediaContentKind.class); + } + + /** + * Gets known MediaContentKind values. + * + * @return known MediaContentKind values. + */ + @Generated + public static Collection values() { + return values(MediaContentKind.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/NumberField.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/NumberField.java new file mode 100644 index 000000000000..58fe74c5dc15 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/NumberField.java @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Number field extracted from the content. + */ +@Immutable +public final class NumberField extends ContentField { + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type = ContentFieldType.NUMBER; + + /* + * Number field value. + */ + @Generated + private Double valueNumber; + + /** + * Creates an instance of NumberField class. + */ + @Generated + private NumberField() { + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + @Override + public ContentFieldType getType() { + return this.type; + } + + /** + * Get the valueNumber property: Number field value. + * + * @return the valueNumber value. + */ + @Generated + public Double getValueNumber() { + return this.valueNumber; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("spans", getSpans(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("confidence", getConfidence()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeNumberField("valueNumber", this.valueNumber); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of NumberField from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of NumberField if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the NumberField. + */ + @Generated + public static NumberField fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + NumberField deserializedNumberField = new NumberField(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("spans".equals(fieldName)) { + List spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + deserializedNumberField.setSpans(spans); + } else if ("confidence".equals(fieldName)) { + deserializedNumberField.setConfidence(reader.getNullable(JsonReader::getDouble)); + } else if ("source".equals(fieldName)) { + deserializedNumberField.setSource(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedNumberField.type = ContentFieldType.fromString(reader.getString()); + } else if ("valueNumber".equals(fieldName)) { + deserializedNumberField.valueNumber = reader.getNullable(JsonReader::getDouble); + } else { + reader.skipChildren(); + } + } + + return deserializedNumberField; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ObjectField.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ObjectField.java new file mode 100644 index 000000000000..bc8d45246a6c --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ObjectField.java @@ -0,0 +1,146 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.core.util.logging.ClientLogger; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; + +/** + * Object field extracted from the content. + */ +@Immutable +public final class ObjectField extends ContentField { + + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type = ContentFieldType.OBJECT; + + /* + * Object field value. + */ + @Generated + private Map valueObject; + + /** + * Creates an instance of ObjectField class. + */ + @Generated + private ObjectField() { + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + @Override + public ContentFieldType getType() { + return this.type; + } + + /** + * Get the valueObject property: Object field value. + * + * @return the valueObject value. + */ + @Generated + public Map getValueObject() { + return this.valueObject; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("spans", getSpans(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("confidence", getConfidence()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeMapField("valueObject", this.valueObject, (writer, element) -> writer.writeJson(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ObjectField from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ObjectField if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the ObjectField. + */ + @Generated + public static ObjectField fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + ObjectField deserializedObjectField = new ObjectField(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + if ("spans".equals(fieldName)) { + List spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + deserializedObjectField.setSpans(spans); + } else if ("confidence".equals(fieldName)) { + deserializedObjectField.setConfidence(reader.getNullable(JsonReader::getDouble)); + } else if ("source".equals(fieldName)) { + deserializedObjectField.setSource(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedObjectField.type = ContentFieldType.fromString(reader.getString()); + } else if ("valueObject".equals(fieldName)) { + Map valueObject = reader.readMap(reader1 -> ContentField.fromJson(reader1)); + deserializedObjectField.valueObject = valueObject; + } else { + reader.skipChildren(); + } + } + return deserializedObjectField; + }); + } + + private static final ClientLogger LOGGER = new ClientLogger(ObjectField.class); + + /** + * Gets a field from the object by name. + * + * @param fieldName The name of the field to retrieve. + * @return The field if found. + * @throws IllegalArgumentException if fieldName is null or empty. + * @throws NoSuchElementException if the field is not found. + */ + public ContentField getField(String fieldName) { + if (fieldName == null || fieldName.isEmpty()) { + throw LOGGER.logThrowableAsError(new IllegalArgumentException("fieldName cannot be null or empty.")); + } + if (getValueObject() != null && getValueObject().containsKey(fieldName)) { + return getValueObject().get(fieldName); + } + throw LOGGER.logThrowableAsError( + new java.util.NoSuchElementException("Field '" + fieldName + "' was not found in the object.")); + } + + /** + * Gets a field from the object by name, or null if the field does not exist. + * + * @param fieldName The name of the field to retrieve. + * @return The field if found, or null if not found. + */ + public ContentField getFieldOrDefault(String fieldName) { + if (fieldName == null || fieldName.isEmpty() || getValueObject() == null) { + return null; + } + return getValueObject().get(fieldName); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/OperationState.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/OperationState.java new file mode 100644 index 000000000000..0414efda0199 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/OperationState.java @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Enum describing allowed operation states. + */ +public final class OperationState extends ExpandableStringEnum { + /** + * The operation has not started. + */ + @Generated + public static final OperationState NOT_STARTED = fromString("NotStarted"); + + /** + * The operation is in progress. + */ + @Generated + public static final OperationState RUNNING = fromString("Running"); + + /** + * The operation has completed successfully. + */ + @Generated + public static final OperationState SUCCEEDED = fromString("Succeeded"); + + /** + * The operation has failed. + */ + @Generated + public static final OperationState FAILED = fromString("Failed"); + + /** + * The operation has been canceled by the user. + */ + @Generated + public static final OperationState CANCELED = fromString("Canceled"); + + /** + * Creates a new instance of OperationState value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public OperationState() { + } + + /** + * Creates or finds a OperationState from its string representation. + * + * @param name a name to look for. + * @return the corresponding OperationState. + */ + @Generated + public static OperationState fromString(String name) { + return fromString(name, OperationState.class); + } + + /** + * Gets known OperationState values. + * + * @return known OperationState values. + */ + @Generated + public static Collection values() { + return values(OperationState.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ProcessingLocation.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ProcessingLocation.java new file mode 100644 index 000000000000..9d47e79cf638 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/ProcessingLocation.java @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * The location where the data may be processed. + */ +public final class ProcessingLocation extends ExpandableStringEnum { + /** + * Data may be processed in the same geography as the resource. + */ + @Generated + public static final ProcessingLocation GEOGRAPHY = fromString("geography"); + + /** + * Data may be processed in the same data zone as the resource. + */ + @Generated + public static final ProcessingLocation DATA_ZONE = fromString("dataZone"); + + /** + * Data may be processed in any Azure data center globally. + */ + @Generated + public static final ProcessingLocation GLOBAL = fromString("global"); + + /** + * Creates a new instance of ProcessingLocation value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public ProcessingLocation() { + } + + /** + * Creates or finds a ProcessingLocation from its string representation. + * + * @param name a name to look for. + * @return the corresponding ProcessingLocation. + */ + @Generated + public static ProcessingLocation fromString(String name) { + return fromString(name, ProcessingLocation.class); + } + + /** + * Gets known ProcessingLocation values. + * + * @return known ProcessingLocation values. + */ + @Generated + public static Collection values() { + return values(ProcessingLocation.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/SemanticRole.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/SemanticRole.java new file mode 100644 index 000000000000..1673500229ea --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/SemanticRole.java @@ -0,0 +1,87 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Semantic role of the paragraph. + */ +public final class SemanticRole extends ExpandableStringEnum { + /** + * Text near the top edge of the page. + */ + @Generated + public static final SemanticRole PAGE_HEADER = fromString("pageHeader"); + + /** + * Text near the bottom edge of the page. + */ + @Generated + public static final SemanticRole PAGE_FOOTER = fromString("pageFooter"); + + /** + * Page number. + */ + @Generated + public static final SemanticRole PAGE_NUMBER = fromString("pageNumber"); + + /** + * Top-level title describing the entire document. + */ + @Generated + public static final SemanticRole TITLE = fromString("title"); + + /** + * Sub heading describing a section of the document. + */ + @Generated + public static final SemanticRole SECTION_HEADING = fromString("sectionHeading"); + + /** + * Note usually placed after the main content on a page. + */ + @Generated + public static final SemanticRole FOOTNOTE = fromString("footnote"); + + /** + * Block of formulas, often with shared alignment. + */ + @Generated + public static final SemanticRole FORMULA_BLOCK = fromString("formulaBlock"); + + /** + * Creates a new instance of SemanticRole value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public SemanticRole() { + } + + /** + * Creates or finds a SemanticRole from its string representation. + * + * @param name a name to look for. + * @return the corresponding SemanticRole. + */ + @Generated + public static SemanticRole fromString(String name) { + return fromString(name, SemanticRole.class); + } + + /** + * Gets known SemanticRole values. + * + * @return known SemanticRole values. + */ + @Generated + public static Collection values() { + return values(SemanticRole.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/StringField.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/StringField.java new file mode 100644 index 000000000000..e7c149a7cbe5 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/StringField.java @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * String field extracted from the content. + */ +@Immutable +public final class StringField extends ContentField { + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type = ContentFieldType.STRING; + + /* + * String field value. + */ + @Generated + private String valueString; + + /** + * Creates an instance of StringField class. + */ + @Generated + private StringField() { + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + @Override + public ContentFieldType getType() { + return this.type; + } + + /** + * Get the valueString property: String field value. + * + * @return the valueString value. + */ + @Generated + public String getValueString() { + return this.valueString; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("spans", getSpans(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("confidence", getConfidence()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeStringField("valueString", this.valueString); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of StringField from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of StringField if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the StringField. + */ + @Generated + public static StringField fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + StringField deserializedStringField = new StringField(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("spans".equals(fieldName)) { + List spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + deserializedStringField.setSpans(spans); + } else if ("confidence".equals(fieldName)) { + deserializedStringField.setConfidence(reader.getNullable(JsonReader::getDouble)); + } else if ("source".equals(fieldName)) { + deserializedStringField.setSource(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedStringField.type = ContentFieldType.fromString(reader.getString()); + } else if ("valueString".equals(fieldName)) { + deserializedStringField.valueString = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedStringField; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/SupportedModels.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/SupportedModels.java new file mode 100644 index 000000000000..29c99af56b60 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/SupportedModels.java @@ -0,0 +1,102 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Chat completion and embedding models supported by the analyzer. + */ +@Immutable +public final class SupportedModels implements JsonSerializable { + /* + * Chat completion models supported by the analyzer. + */ + @Generated + private List completion; + + /* + * Embedding models supported by the analyzer. + */ + @Generated + private List embedding; + + /** + * Creates an instance of SupportedModels class. + */ + @Generated + private SupportedModels() { + } + + /** + * Get the completion property: Chat completion models supported by the analyzer. + * + * @return the completion value. + */ + @Generated + public List getCompletion() { + return this.completion; + } + + /** + * Get the embedding property: Embedding models supported by the analyzer. + * + * @return the embedding value. + */ + @Generated + public List getEmbedding() { + return this.embedding; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("completion", this.completion, (writer, element) -> writer.writeString(element)); + jsonWriter.writeArrayField("embedding", this.embedding, (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of SupportedModels from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of SupportedModels if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the SupportedModels. + */ + @Generated + public static SupportedModels fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + SupportedModels deserializedSupportedModels = new SupportedModels(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("completion".equals(fieldName)) { + List completion = reader.readArray(reader1 -> reader1.getString()); + deserializedSupportedModels.completion = completion; + } else if ("embedding".equals(fieldName)) { + List embedding = reader.readArray(reader1 -> reader1.getString()); + deserializedSupportedModels.embedding = embedding; + } else { + reader.skipChildren(); + } + } + + return deserializedSupportedModels; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TableFormat.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TableFormat.java new file mode 100644 index 000000000000..e662dc679c25 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TableFormat.java @@ -0,0 +1,58 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Representation format of tables in analyze result markdown. + */ +public final class TableFormat extends ExpandableStringEnum { + /** + * Represent tables using HTML table elements: \<table>, \<th>, \<tr>, \<td>. + */ + @Generated + public static final TableFormat HTML = fromString("html"); + + /** + * Represent tables using GitHub Flavored Markdown table syntax, which does not support merged cells or rich + * headers. + */ + @Generated + public static final TableFormat MARKDOWN = fromString("markdown"); + + /** + * Creates a new instance of TableFormat value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Generated + @Deprecated + public TableFormat() { + } + + /** + * Creates or finds a TableFormat from its string representation. + * + * @param name a name to look for. + * @return the corresponding TableFormat. + */ + @Generated + public static TableFormat fromString(String name) { + return fromString(name, TableFormat.class); + } + + /** + * Gets known TableFormat values. + * + * @return known TableFormat values. + */ + @Generated + public static Collection values() { + return values(TableFormat.class); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TimeField.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TimeField.java new file mode 100644 index 000000000000..0308c5c64844 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TimeField.java @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Time field extracted from the content. + */ +@Immutable +public final class TimeField extends ContentField { + /* + * Semantic data type of the field value. + */ + @Generated + private ContentFieldType type = ContentFieldType.TIME; + + /* + * Time field value, in ISO 8601 (hh:mm:ss) format. + */ + @Generated + private String valueTime; + + /** + * Creates an instance of TimeField class. + */ + @Generated + private TimeField() { + } + + /** + * Get the type property: Semantic data type of the field value. + * + * @return the type value. + */ + @Generated + @Override + public ContentFieldType getType() { + return this.type; + } + + /** + * Get the valueTime property: Time field value, in ISO 8601 (hh:mm:ss) format. + * + * @return the valueTime value. + */ + @Generated + public String getValueTime() { + return this.valueTime; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("spans", getSpans(), (writer, element) -> writer.writeJson(element)); + jsonWriter.writeNumberField("confidence", getConfidence()); + jsonWriter.writeStringField("source", getSource()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeStringField("valueTime", this.valueTime); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of TimeField from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of TimeField if the JsonReader was pointing to an instance of it, or null if it was pointing + * to JSON null. + * @throws IOException If an error occurs while reading the TimeField. + */ + @Generated + public static TimeField fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + TimeField deserializedTimeField = new TimeField(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("spans".equals(fieldName)) { + List spans = reader.readArray(reader1 -> ContentSpan.fromJson(reader1)); + deserializedTimeField.setSpans(spans); + } else if ("confidence".equals(fieldName)) { + deserializedTimeField.setConfidence(reader.getNullable(JsonReader::getDouble)); + } else if ("source".equals(fieldName)) { + deserializedTimeField.setSource(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedTimeField.type = ContentFieldType.fromString(reader.getString()); + } else if ("valueTime".equals(fieldName)) { + deserializedTimeField.valueTime = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedTimeField; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TranscriptPhrase.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TranscriptPhrase.java new file mode 100644 index 000000000000..2e24725d7cab --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TranscriptPhrase.java @@ -0,0 +1,236 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Transcript phrase. + */ +@Immutable +public final class TranscriptPhrase implements JsonSerializable { + /* + * Speaker index or name. + */ + @Generated + private String speaker; + + /* + * Start time of the phrase in milliseconds. + */ + @Generated + private final long startTimeMs; + + /* + * End time of the phrase in milliseconds. + */ + @Generated + private final long endTimeMs; + + /* + * Detected locale of the phrase. Ex. en-US. + */ + @Generated + private String locale; + + /* + * Transcript text. + */ + @Generated + private final String text; + + /* + * Confidence of predicting the phrase. + */ + @Generated + private Double confidence; + + /* + * Span of the phrase in the markdown content. + */ + @Generated + private ContentSpan span; + + /* + * List of words in the phrase. + */ + @Generated + private final List words; + + /** + * Creates an instance of TranscriptPhrase class. + * + * @param startTimeMs the startTimeMs value to set. + * @param endTimeMs the endTimeMs value to set. + * @param text the text value to set. + * @param words the words value to set. + */ + @Generated + private TranscriptPhrase(long startTimeMs, long endTimeMs, String text, List words) { + this.startTimeMs = startTimeMs; + this.endTimeMs = endTimeMs; + this.text = text; + this.words = words; + } + + /** + * Get the speaker property: Speaker index or name. + * + * @return the speaker value. + */ + @Generated + public String getSpeaker() { + return this.speaker; + } + + /** + * Get the startTimeMs property: Start time of the phrase in milliseconds. + * + * @return the startTimeMs value. + */ + @Generated + public long getStartTimeMs() { + return this.startTimeMs; + } + + /** + * Get the endTimeMs property: End time of the phrase in milliseconds. + * + * @return the endTimeMs value. + */ + @Generated + public long getEndTimeMs() { + return this.endTimeMs; + } + + /** + * Get the locale property: Detected locale of the phrase. Ex. en-US. + * + * @return the locale value. + */ + @Generated + public String getLocale() { + return this.locale; + } + + /** + * Get the text property: Transcript text. + * + * @return the text value. + */ + @Generated + public String getText() { + return this.text; + } + + /** + * Get the confidence property: Confidence of predicting the phrase. + * + * @return the confidence value. + */ + @Generated + public Double getConfidence() { + return this.confidence; + } + + /** + * Get the span property: Span of the phrase in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * Get the words property: List of words in the phrase. + * + * @return the words value. + */ + @Generated + public List getWords() { + return this.words; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeLongField("startTimeMs", this.startTimeMs); + jsonWriter.writeLongField("endTimeMs", this.endTimeMs); + jsonWriter.writeStringField("text", this.text); + jsonWriter.writeArrayField("words", this.words, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("speaker", this.speaker); + jsonWriter.writeStringField("locale", this.locale); + jsonWriter.writeNumberField("confidence", this.confidence); + jsonWriter.writeJsonField("span", this.span); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of TranscriptPhrase from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of TranscriptPhrase if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the TranscriptPhrase. + */ + @Generated + public static TranscriptPhrase fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + long startTimeMs = 0L; + long endTimeMs = 0L; + String text = null; + List words = null; + String speaker = null; + String locale = null; + Double confidence = null; + ContentSpan span = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("startTimeMs".equals(fieldName)) { + startTimeMs = reader.getLong(); + } else if ("endTimeMs".equals(fieldName)) { + endTimeMs = reader.getLong(); + } else if ("text".equals(fieldName)) { + text = reader.getString(); + } else if ("words".equals(fieldName)) { + words = reader.readArray(reader1 -> TranscriptWord.fromJson(reader1)); + } else if ("speaker".equals(fieldName)) { + speaker = reader.getString(); + } else if ("locale".equals(fieldName)) { + locale = reader.getString(); + } else if ("confidence".equals(fieldName)) { + confidence = reader.getNullable(JsonReader::getDouble); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else { + reader.skipChildren(); + } + } + TranscriptPhrase deserializedTranscriptPhrase = new TranscriptPhrase(startTimeMs, endTimeMs, text, words); + deserializedTranscriptPhrase.speaker = speaker; + deserializedTranscriptPhrase.locale = locale; + deserializedTranscriptPhrase.confidence = confidence; + deserializedTranscriptPhrase.span = span; + + return deserializedTranscriptPhrase; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TranscriptWord.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TranscriptWord.java new file mode 100644 index 000000000000..b36590b5cc59 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/TranscriptWord.java @@ -0,0 +1,150 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Transcript word. + */ +@Immutable +public final class TranscriptWord implements JsonSerializable { + /* + * Start time of the word in milliseconds. + */ + @Generated + private final long startTimeMs; + + /* + * End time of the word in milliseconds. + */ + @Generated + private final long endTimeMs; + + /* + * Transcript text. + */ + @Generated + private final String text; + + /* + * Span of the word in the markdown content. + */ + @Generated + private ContentSpan span; + + /** + * Creates an instance of TranscriptWord class. + * + * @param startTimeMs the startTimeMs value to set. + * @param endTimeMs the endTimeMs value to set. + * @param text the text value to set. + */ + @Generated + private TranscriptWord(long startTimeMs, long endTimeMs, String text) { + this.startTimeMs = startTimeMs; + this.endTimeMs = endTimeMs; + this.text = text; + } + + /** + * Get the startTimeMs property: Start time of the word in milliseconds. + * + * @return the startTimeMs value. + */ + @Generated + public long getStartTimeMs() { + return this.startTimeMs; + } + + /** + * Get the endTimeMs property: End time of the word in milliseconds. + * + * @return the endTimeMs value. + */ + @Generated + public long getEndTimeMs() { + return this.endTimeMs; + } + + /** + * Get the text property: Transcript text. + * + * @return the text value. + */ + @Generated + public String getText() { + return this.text; + } + + /** + * Get the span property: Span of the word in the markdown content. + * + * @return the span value. + */ + @Generated + public ContentSpan getSpan() { + return this.span; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeLongField("startTimeMs", this.startTimeMs); + jsonWriter.writeLongField("endTimeMs", this.endTimeMs); + jsonWriter.writeStringField("text", this.text); + jsonWriter.writeJsonField("span", this.span); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of TranscriptWord from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of TranscriptWord if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the TranscriptWord. + */ + @Generated + public static TranscriptWord fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + long startTimeMs = 0L; + long endTimeMs = 0L; + String text = null; + ContentSpan span = null; + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("startTimeMs".equals(fieldName)) { + startTimeMs = reader.getLong(); + } else if ("endTimeMs".equals(fieldName)) { + endTimeMs = reader.getLong(); + } else if ("text".equals(fieldName)) { + text = reader.getString(); + } else if ("span".equals(fieldName)) { + span = ContentSpan.fromJson(reader); + } else { + reader.skipChildren(); + } + } + TranscriptWord deserializedTranscriptWord = new TranscriptWord(startTimeMs, endTimeMs, text); + deserializedTranscriptWord.span = span; + + return deserializedTranscriptWord; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/UsageDetails.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/UsageDetails.java new file mode 100644 index 000000000000..2ed0a9304da6 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/UsageDetails.java @@ -0,0 +1,206 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.models; + +import com.azure.core.annotation.Generated; +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.Map; + +/** + * Usage details. + */ +@Immutable +public final class UsageDetails implements JsonSerializable { + /* + * The number of document pages processed at the minimal level. + * For documents without explicit pages (ex. txt, html), every 3000 UTF-16 characters is counted as one page. + */ + @Generated + private Integer documentPagesMinimal; + + /* + * The number of document pages processed at the basic level. + * For documents without explicit pages (ex. txt, html), every 3000 UTF-16 characters is counted as one page. + */ + @Generated + private Integer documentPagesBasic; + + /* + * The number of document pages processed at the standard level. + * For documents without explicit pages (ex. txt, html), every 3000 UTF-16 characters is counted as one page. + */ + @Generated + private Integer documentPagesStandard; + + /* + * The hours of audio processed. + */ + @Generated + private Double audioHours; + + /* + * The hours of video processed. + */ + @Generated + private Double videoHours; + + /* + * The number of contextualization tokens consumed for preparing context, generating confidence scores, source + * grounding, and output formatting. + */ + @Generated + private Integer contextualizationTokens; + + /* + * The number of LLM and embedding tokens consumed, grouped by model (ex. GTP 4.1) and type (ex. input, cached + * input, output). + */ + @Generated + private Map tokens; + + /** + * Creates an instance of UsageDetails class. + */ + @Generated + private UsageDetails() { + } + + /** + * Get the documentPagesMinimal property: The number of document pages processed at the minimal level. + * For documents without explicit pages (ex. txt, html), every 3000 UTF-16 characters is counted as one page. + * + * @return the documentPagesMinimal value. + */ + @Generated + public Integer getDocumentPagesMinimal() { + return this.documentPagesMinimal; + } + + /** + * Get the documentPagesBasic property: The number of document pages processed at the basic level. + * For documents without explicit pages (ex. txt, html), every 3000 UTF-16 characters is counted as one page. + * + * @return the documentPagesBasic value. + */ + @Generated + public Integer getDocumentPagesBasic() { + return this.documentPagesBasic; + } + + /** + * Get the documentPagesStandard property: The number of document pages processed at the standard level. + * For documents without explicit pages (ex. txt, html), every 3000 UTF-16 characters is counted as one page. + * + * @return the documentPagesStandard value. + */ + @Generated + public Integer getDocumentPagesStandard() { + return this.documentPagesStandard; + } + + /** + * Get the audioHours property: The hours of audio processed. + * + * @return the audioHours value. + */ + @Generated + public Double getAudioHours() { + return this.audioHours; + } + + /** + * Get the videoHours property: The hours of video processed. + * + * @return the videoHours value. + */ + @Generated + public Double getVideoHours() { + return this.videoHours; + } + + /** + * Get the contextualizationTokens property: The number of contextualization tokens consumed for preparing context, + * generating confidence scores, source grounding, and output formatting. + * + * @return the contextualizationTokens value. + */ + @Generated + public Integer getContextualizationTokens() { + return this.contextualizationTokens; + } + + /** + * Get the tokens property: The number of LLM and embedding tokens consumed, grouped by model (ex. GTP 4.1) and type + * (ex. input, cached input, output). + * + * @return the tokens value. + */ + @Generated + public Map getTokens() { + return this.tokens; + } + + /** + * {@inheritDoc} + */ + @Generated + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeNumberField("documentPagesMinimal", this.documentPagesMinimal); + jsonWriter.writeNumberField("documentPagesBasic", this.documentPagesBasic); + jsonWriter.writeNumberField("documentPagesStandard", this.documentPagesStandard); + jsonWriter.writeNumberField("audioHours", this.audioHours); + jsonWriter.writeNumberField("videoHours", this.videoHours); + jsonWriter.writeNumberField("contextualizationTokens", this.contextualizationTokens); + jsonWriter.writeMapField("tokens", this.tokens, (writer, element) -> writer.writeInt(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of UsageDetails from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of UsageDetails if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the UsageDetails. + */ + @Generated + public static UsageDetails fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + UsageDetails deserializedUsageDetails = new UsageDetails(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("documentPagesMinimal".equals(fieldName)) { + deserializedUsageDetails.documentPagesMinimal = reader.getNullable(JsonReader::getInt); + } else if ("documentPagesBasic".equals(fieldName)) { + deserializedUsageDetails.documentPagesBasic = reader.getNullable(JsonReader::getInt); + } else if ("documentPagesStandard".equals(fieldName)) { + deserializedUsageDetails.documentPagesStandard = reader.getNullable(JsonReader::getInt); + } else if ("audioHours".equals(fieldName)) { + deserializedUsageDetails.audioHours = reader.getNullable(JsonReader::getDouble); + } else if ("videoHours".equals(fieldName)) { + deserializedUsageDetails.videoHours = reader.getNullable(JsonReader::getDouble); + } else if ("contextualizationTokens".equals(fieldName)) { + deserializedUsageDetails.contextualizationTokens = reader.getNullable(JsonReader::getInt); + } else if ("tokens".equals(fieldName)) { + Map tokens = reader.readMap(reader1 -> reader1.getInt()); + deserializedUsageDetails.tokens = tokens; + } else { + reader.skipChildren(); + } + } + + return deserializedUsageDetails; + }); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/package-info.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/package-info.java new file mode 100644 index 000000000000..4c723c8be33f --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/models/package-info.java @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +/** + * Package containing the data models for ContentUnderstanding. + * The Content Understanding service extracts content and fields from multimodal input. + */ +package com.azure.ai.contentunderstanding.models; diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/package-info.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/package-info.java new file mode 100644 index 000000000000..88d77a4a8c7f --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/com/azure/ai/contentunderstanding/package-info.java @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +/** + * Package containing the classes for ContentUnderstanding. + * The Content Understanding service extracts content and fields from multimodal input. + */ +package com.azure.ai.contentunderstanding; diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/module-info.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/module-info.java new file mode 100644 index 000000000000..c378561457ab --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/java/module-info.java @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +module com.azure.ai.contentunderstanding { + requires transitive com.azure.core; + + exports com.azure.ai.contentunderstanding; + exports com.azure.ai.contentunderstanding.models; + + opens com.azure.ai.contentunderstanding.models to com.azure.core; + opens com.azure.ai.contentunderstanding.implementation.models to com.azure.core; +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/resources/META-INF/azure-ai-contentunderstanding_apiview_properties.json b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/resources/META-INF/azure-ai-contentunderstanding_apiview_properties.json new file mode 100644 index 000000000000..b1b6154be6f2 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/resources/META-INF/azure-ai-contentunderstanding_apiview_properties.json @@ -0,0 +1,132 @@ +{ + "flavor": "azure", + "CrossLanguageDefinitionId": { + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient": "ClientCustomizations.ContentUnderstandingClient", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginAnalyze": "ClientCustomizations.ContentUnderstandingClient.analyze", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginAnalyzeBinary": "ClientCustomizations.ContentUnderstandingClient.analyzeBinary", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginAnalyzeBinaryWithModel": "ClientCustomizations.ContentUnderstandingClient.analyzeBinary", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginAnalyzeWithModel": "ClientCustomizations.ContentUnderstandingClient.analyze", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginCopyAnalyzer": "ClientCustomizations.ContentUnderstandingClient.copyAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginCopyAnalyzerWithModel": "ClientCustomizations.ContentUnderstandingClient.copyAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginCreateAnalyzer": "ClientCustomizations.ContentUnderstandingClient.createAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginCreateAnalyzerWithModel": "ClientCustomizations.ContentUnderstandingClient.createAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.deleteAnalyzer": "ClientCustomizations.ContentUnderstandingClient.deleteAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.deleteAnalyzerWithResponse": "ClientCustomizations.ContentUnderstandingClient.deleteAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.deleteResult": "ClientCustomizations.ContentUnderstandingClient.deleteResult", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.deleteResultWithResponse": "ClientCustomizations.ContentUnderstandingClient.deleteResult", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getAnalyzer": "ClientCustomizations.ContentUnderstandingClient.getAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getAnalyzerWithResponse": "ClientCustomizations.ContentUnderstandingClient.getAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getDefaults": "ClientCustomizations.ContentUnderstandingClient.getDefaults", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getDefaultsWithResponse": "ClientCustomizations.ContentUnderstandingClient.getDefaults", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getOperationStatus": "ClientCustomizations.ContentUnderstandingClient.getOperationStatus", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getOperationStatusWithResponse": "ClientCustomizations.ContentUnderstandingClient.getOperationStatus", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getResult": "ClientCustomizations.ContentUnderstandingClient.getResult", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getResultFile": "ClientCustomizations.ContentUnderstandingClient.getResultFile", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getResultFileWithResponse": "ClientCustomizations.ContentUnderstandingClient.getResultFile", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getResultWithResponse": "ClientCustomizations.ContentUnderstandingClient.getResult", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.grantCopyAuthorization": "ClientCustomizations.ContentUnderstandingClient.grantCopyAuthorization", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.grantCopyAuthorizationWithResponse": "ClientCustomizations.ContentUnderstandingClient.grantCopyAuthorization", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.listAnalyzers": "ClientCustomizations.ContentUnderstandingClient.listAnalyzers", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.updateAnalyzer": "ClientCustomizations.ContentUnderstandingClient.updateAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.updateAnalyzerWithResponse": "ClientCustomizations.ContentUnderstandingClient.updateAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient": "ClientCustomizations.ContentUnderstandingClient", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginAnalyze": "ClientCustomizations.ContentUnderstandingClient.analyze", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginAnalyzeBinary": "ClientCustomizations.ContentUnderstandingClient.analyzeBinary", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginAnalyzeBinaryWithModel": "ClientCustomizations.ContentUnderstandingClient.analyzeBinary", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginAnalyzeWithModel": "ClientCustomizations.ContentUnderstandingClient.analyze", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginCopyAnalyzer": "ClientCustomizations.ContentUnderstandingClient.copyAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginCopyAnalyzerWithModel": "ClientCustomizations.ContentUnderstandingClient.copyAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginCreateAnalyzer": "ClientCustomizations.ContentUnderstandingClient.createAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginCreateAnalyzerWithModel": "ClientCustomizations.ContentUnderstandingClient.createAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.deleteAnalyzer": "ClientCustomizations.ContentUnderstandingClient.deleteAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.deleteAnalyzerWithResponse": "ClientCustomizations.ContentUnderstandingClient.deleteAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.deleteResult": "ClientCustomizations.ContentUnderstandingClient.deleteResult", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.deleteResultWithResponse": "ClientCustomizations.ContentUnderstandingClient.deleteResult", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.getAnalyzer": "ClientCustomizations.ContentUnderstandingClient.getAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.getAnalyzerWithResponse": "ClientCustomizations.ContentUnderstandingClient.getAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.getDefaults": "ClientCustomizations.ContentUnderstandingClient.getDefaults", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.getDefaultsWithResponse": "ClientCustomizations.ContentUnderstandingClient.getDefaults", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.getOperationStatus": "ClientCustomizations.ContentUnderstandingClient.getOperationStatus", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.getOperationStatusWithResponse": "ClientCustomizations.ContentUnderstandingClient.getOperationStatus", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.getResult": "ClientCustomizations.ContentUnderstandingClient.getResult", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.getResultFile": "ClientCustomizations.ContentUnderstandingClient.getResultFile", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.getResultFileWithResponse": "ClientCustomizations.ContentUnderstandingClient.getResultFile", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.getResultWithResponse": "ClientCustomizations.ContentUnderstandingClient.getResult", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.grantCopyAuthorization": "ClientCustomizations.ContentUnderstandingClient.grantCopyAuthorization", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.grantCopyAuthorizationWithResponse": "ClientCustomizations.ContentUnderstandingClient.grantCopyAuthorization", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.listAnalyzers": "ClientCustomizations.ContentUnderstandingClient.listAnalyzers", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.updateAnalyzer": "ClientCustomizations.ContentUnderstandingClient.updateAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClient.updateAnalyzerWithResponse": "ClientCustomizations.ContentUnderstandingClient.updateAnalyzer", + "com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder": "ClientCustomizations.ContentUnderstandingClient", + "com.azure.ai.contentunderstanding.implementation.models.AnalyzeRequest1": "ClientCustomizations.analyze.Request.anonymous", + "com.azure.ai.contentunderstanding.implementation.models.CopyAnalyzerRequest": "ClientCustomizations.copyAnalyzer.Request.anonymous", + "com.azure.ai.contentunderstanding.implementation.models.GrantCopyAuthorizationRequest1": "ClientCustomizations.grantCopyAuthorization.Request.anonymous", + "com.azure.ai.contentunderstanding.models.AnalyzeInput": "ContentUnderstanding.AnalyzeInput", + "com.azure.ai.contentunderstanding.models.AnalyzeResult": "ContentUnderstanding.AnalyzeResult", + "com.azure.ai.contentunderstanding.models.AnnotationFormat": "ContentUnderstanding.AnnotationFormat", + "com.azure.ai.contentunderstanding.models.ArrayField": "ContentUnderstanding.ArrayField", + "com.azure.ai.contentunderstanding.models.AudioVisualContent": "ContentUnderstanding.AudioVisualContent", + "com.azure.ai.contentunderstanding.models.AudioVisualContentSegment": "ContentUnderstanding.AudioVisualContentSegment", + "com.azure.ai.contentunderstanding.models.BooleanField": "ContentUnderstanding.BooleanField", + "com.azure.ai.contentunderstanding.models.ChartFormat": "ContentUnderstanding.ChartFormat", + "com.azure.ai.contentunderstanding.models.ContentAnalyzer": "ContentUnderstanding.ContentAnalyzer", + "com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus": "ContentUnderstanding.ContentAnalyzerAnalyzeOperationStatus", + "com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig": "ContentUnderstanding.ContentAnalyzerConfig", + "com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus": "ContentUnderstanding.ContentAnalyzerOperationStatus", + "com.azure.ai.contentunderstanding.models.ContentAnalyzerStatus": "ContentUnderstanding.ContentAnalyzerStatus", + "com.azure.ai.contentunderstanding.models.ContentCategoryDefinition": "ContentUnderstanding.ContentCategoryDefinition", + "com.azure.ai.contentunderstanding.models.ContentField": "ContentUnderstanding.ContentField", + "com.azure.ai.contentunderstanding.models.ContentFieldDefinition": "ContentUnderstanding.ContentFieldDefinition", + "com.azure.ai.contentunderstanding.models.ContentFieldSchema": "ContentUnderstanding.FieldSchema", + "com.azure.ai.contentunderstanding.models.ContentFieldType": "ContentUnderstanding.ContentFieldType", + "com.azure.ai.contentunderstanding.models.ContentSpan": "ContentUnderstanding.ContentSpan", + "com.azure.ai.contentunderstanding.models.ContentUnderstandingDefaults": "ContentUnderstanding.ContentUnderstandingDefaults", + "com.azure.ai.contentunderstanding.models.CopyAuthorization": "ContentUnderstanding.CopyAuthorization", + "com.azure.ai.contentunderstanding.models.DateField": "ContentUnderstanding.DateField", + "com.azure.ai.contentunderstanding.models.DocumentAnnotation": "ContentUnderstanding.DocumentAnnotation", + "com.azure.ai.contentunderstanding.models.DocumentAnnotationComment": "ContentUnderstanding.DocumentAnnotationComment", + "com.azure.ai.contentunderstanding.models.DocumentAnnotationKind": "ContentUnderstanding.DocumentAnnotationKind", + "com.azure.ai.contentunderstanding.models.DocumentBarcode": "ContentUnderstanding.DocumentBarcode", + "com.azure.ai.contentunderstanding.models.DocumentBarcodeKind": "ContentUnderstanding.DocumentBarcodeKind", + "com.azure.ai.contentunderstanding.models.DocumentCaption": "ContentUnderstanding.DocumentCaption", + "com.azure.ai.contentunderstanding.models.DocumentChartFigure": "ContentUnderstanding.DocumentChartFigure", + "com.azure.ai.contentunderstanding.models.DocumentContent": "ContentUnderstanding.DocumentContent", + "com.azure.ai.contentunderstanding.models.DocumentContentSegment": "ContentUnderstanding.DocumentContentSegment", + "com.azure.ai.contentunderstanding.models.DocumentFigure": "ContentUnderstanding.DocumentFigure", + "com.azure.ai.contentunderstanding.models.DocumentFigureKind": "ContentUnderstanding.DocumentFigureKind", + "com.azure.ai.contentunderstanding.models.DocumentFootnote": "ContentUnderstanding.DocumentFootnote", + "com.azure.ai.contentunderstanding.models.DocumentFormula": "ContentUnderstanding.DocumentFormula", + "com.azure.ai.contentunderstanding.models.DocumentFormulaKind": "ContentUnderstanding.DocumentFormulaKind", + "com.azure.ai.contentunderstanding.models.DocumentHyperlink": "ContentUnderstanding.DocumentHyperlink", + "com.azure.ai.contentunderstanding.models.DocumentLine": "ContentUnderstanding.DocumentLine", + "com.azure.ai.contentunderstanding.models.DocumentMermaidFigure": "ContentUnderstanding.DocumentMermaidFigure", + "com.azure.ai.contentunderstanding.models.DocumentPage": "ContentUnderstanding.DocumentPage", + "com.azure.ai.contentunderstanding.models.DocumentParagraph": "ContentUnderstanding.DocumentParagraph", + "com.azure.ai.contentunderstanding.models.DocumentSection": "ContentUnderstanding.DocumentSection", + "com.azure.ai.contentunderstanding.models.DocumentTable": "ContentUnderstanding.DocumentTable", + "com.azure.ai.contentunderstanding.models.DocumentTableCell": "ContentUnderstanding.DocumentTableCell", + "com.azure.ai.contentunderstanding.models.DocumentTableCellKind": "ContentUnderstanding.DocumentTableCellKind", + "com.azure.ai.contentunderstanding.models.DocumentWord": "ContentUnderstanding.DocumentWord", + "com.azure.ai.contentunderstanding.models.GenerationMethod": "ContentUnderstanding.GenerationMethod", + "com.azure.ai.contentunderstanding.models.IntegerField": "ContentUnderstanding.IntegerField", + "com.azure.ai.contentunderstanding.models.JsonField": "ContentUnderstanding.JsonField", + "com.azure.ai.contentunderstanding.models.KnowledgeSource": "ContentUnderstanding.KnowledgeSource", + "com.azure.ai.contentunderstanding.models.KnowledgeSourceKind": "ContentUnderstanding.KnowledgeSourceKind", + "com.azure.ai.contentunderstanding.models.LabeledDataKnowledgeSource": "ContentUnderstanding.LabeledDataKnowledgeSource", + "com.azure.ai.contentunderstanding.models.LengthUnit": "ContentUnderstanding.LengthUnit", + "com.azure.ai.contentunderstanding.models.MediaContent": "ContentUnderstanding.MediaContent", + "com.azure.ai.contentunderstanding.models.MediaContentKind": "ContentUnderstanding.MediaContentKind", + "com.azure.ai.contentunderstanding.models.NumberField": "ContentUnderstanding.NumberField", + "com.azure.ai.contentunderstanding.models.ObjectField": "ContentUnderstanding.ObjectField", + "com.azure.ai.contentunderstanding.models.OperationState": "Azure.Core.Foundations.OperationState", + "com.azure.ai.contentunderstanding.models.ProcessingLocation": "ContentUnderstanding.ProcessingLocation", + "com.azure.ai.contentunderstanding.models.SemanticRole": "ContentUnderstanding.SemanticRole", + "com.azure.ai.contentunderstanding.models.StringField": "ContentUnderstanding.StringField", + "com.azure.ai.contentunderstanding.models.SupportedModels": "ContentUnderstanding.SupportedModels", + "com.azure.ai.contentunderstanding.models.TableFormat": "ContentUnderstanding.TableFormat", + "com.azure.ai.contentunderstanding.models.TimeField": "ContentUnderstanding.TimeField", + "com.azure.ai.contentunderstanding.models.TranscriptPhrase": "ContentUnderstanding.TranscriptPhrase", + "com.azure.ai.contentunderstanding.models.TranscriptWord": "ContentUnderstanding.TranscriptWord", + "com.azure.ai.contentunderstanding.models.UsageDetails": "ContentUnderstanding.UsageDetails" + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/resources/META-INF/azure-ai-contentunderstanding_metadata.json b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/resources/META-INF/azure-ai-contentunderstanding_metadata.json new file mode 100644 index 000000000000..f00b262922d9 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/resources/META-INF/azure-ai-contentunderstanding_metadata.json @@ -0,0 +1 @@ +{"flavor":"azure","apiVersion":"2025-11-01","crossLanguageDefinitions":{"com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient":"ClientCustomizations.ContentUnderstandingClient","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginAnalyze":"ClientCustomizations.ContentUnderstandingClient.analyze","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginAnalyzeBinary":"ClientCustomizations.ContentUnderstandingClient.analyzeBinary","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginAnalyzeBinaryWithModel":"ClientCustomizations.ContentUnderstandingClient.analyzeBinary","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginAnalyzeWithModel":"ClientCustomizations.ContentUnderstandingClient.analyze","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginCopyAnalyzer":"ClientCustomizations.ContentUnderstandingClient.copyAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginCopyAnalyzerWithModel":"ClientCustomizations.ContentUnderstandingClient.copyAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginCreateAnalyzer":"ClientCustomizations.ContentUnderstandingClient.createAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.beginCreateAnalyzerWithModel":"ClientCustomizations.ContentUnderstandingClient.createAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.deleteAnalyzer":"ClientCustomizations.ContentUnderstandingClient.deleteAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.deleteAnalyzerWithResponse":"ClientCustomizations.ContentUnderstandingClient.deleteAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.deleteResult":"ClientCustomizations.ContentUnderstandingClient.deleteResult","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.deleteResultWithResponse":"ClientCustomizations.ContentUnderstandingClient.deleteResult","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getAnalyzer":"ClientCustomizations.ContentUnderstandingClient.getAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getAnalyzerWithResponse":"ClientCustomizations.ContentUnderstandingClient.getAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getDefaults":"ClientCustomizations.ContentUnderstandingClient.getDefaults","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getDefaultsWithResponse":"ClientCustomizations.ContentUnderstandingClient.getDefaults","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getOperationStatus":"ClientCustomizations.ContentUnderstandingClient.getOperationStatus","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getOperationStatusWithResponse":"ClientCustomizations.ContentUnderstandingClient.getOperationStatus","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getResult":"ClientCustomizations.ContentUnderstandingClient.getResult","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getResultFile":"ClientCustomizations.ContentUnderstandingClient.getResultFile","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getResultFileWithResponse":"ClientCustomizations.ContentUnderstandingClient.getResultFile","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.getResultWithResponse":"ClientCustomizations.ContentUnderstandingClient.getResult","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.grantCopyAuthorization":"ClientCustomizations.ContentUnderstandingClient.grantCopyAuthorization","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.grantCopyAuthorizationWithResponse":"ClientCustomizations.ContentUnderstandingClient.grantCopyAuthorization","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.listAnalyzers":"ClientCustomizations.ContentUnderstandingClient.listAnalyzers","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.updateAnalyzer":"ClientCustomizations.ContentUnderstandingClient.updateAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient.updateAnalyzerWithResponse":"ClientCustomizations.ContentUnderstandingClient.updateAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClient":"ClientCustomizations.ContentUnderstandingClient","com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginAnalyze":"ClientCustomizations.ContentUnderstandingClient.analyze","com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginAnalyzeBinary":"ClientCustomizations.ContentUnderstandingClient.analyzeBinary","com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginAnalyzeBinaryWithModel":"ClientCustomizations.ContentUnderstandingClient.analyzeBinary","com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginAnalyzeWithModel":"ClientCustomizations.ContentUnderstandingClient.analyze","com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginCopyAnalyzer":"ClientCustomizations.ContentUnderstandingClient.copyAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginCopyAnalyzerWithModel":"ClientCustomizations.ContentUnderstandingClient.copyAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginCreateAnalyzer":"ClientCustomizations.ContentUnderstandingClient.createAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClient.beginCreateAnalyzerWithModel":"ClientCustomizations.ContentUnderstandingClient.createAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClient.deleteAnalyzer":"ClientCustomizations.ContentUnderstandingClient.deleteAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClient.deleteAnalyzerWithResponse":"ClientCustomizations.ContentUnderstandingClient.deleteAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClient.deleteResult":"ClientCustomizations.ContentUnderstandingClient.deleteResult","com.azure.ai.contentunderstanding.ContentUnderstandingClient.deleteResultWithResponse":"ClientCustomizations.ContentUnderstandingClient.deleteResult","com.azure.ai.contentunderstanding.ContentUnderstandingClient.getAnalyzer":"ClientCustomizations.ContentUnderstandingClient.getAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClient.getAnalyzerWithResponse":"ClientCustomizations.ContentUnderstandingClient.getAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClient.getDefaults":"ClientCustomizations.ContentUnderstandingClient.getDefaults","com.azure.ai.contentunderstanding.ContentUnderstandingClient.getDefaultsWithResponse":"ClientCustomizations.ContentUnderstandingClient.getDefaults","com.azure.ai.contentunderstanding.ContentUnderstandingClient.getOperationStatus":"ClientCustomizations.ContentUnderstandingClient.getOperationStatus","com.azure.ai.contentunderstanding.ContentUnderstandingClient.getOperationStatusWithResponse":"ClientCustomizations.ContentUnderstandingClient.getOperationStatus","com.azure.ai.contentunderstanding.ContentUnderstandingClient.getResult":"ClientCustomizations.ContentUnderstandingClient.getResult","com.azure.ai.contentunderstanding.ContentUnderstandingClient.getResultFile":"ClientCustomizations.ContentUnderstandingClient.getResultFile","com.azure.ai.contentunderstanding.ContentUnderstandingClient.getResultFileWithResponse":"ClientCustomizations.ContentUnderstandingClient.getResultFile","com.azure.ai.contentunderstanding.ContentUnderstandingClient.getResultWithResponse":"ClientCustomizations.ContentUnderstandingClient.getResult","com.azure.ai.contentunderstanding.ContentUnderstandingClient.grantCopyAuthorization":"ClientCustomizations.ContentUnderstandingClient.grantCopyAuthorization","com.azure.ai.contentunderstanding.ContentUnderstandingClient.grantCopyAuthorizationWithResponse":"ClientCustomizations.ContentUnderstandingClient.grantCopyAuthorization","com.azure.ai.contentunderstanding.ContentUnderstandingClient.listAnalyzers":"ClientCustomizations.ContentUnderstandingClient.listAnalyzers","com.azure.ai.contentunderstanding.ContentUnderstandingClient.updateAnalyzer":"ClientCustomizations.ContentUnderstandingClient.updateAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClient.updateAnalyzerWithResponse":"ClientCustomizations.ContentUnderstandingClient.updateAnalyzer","com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder":"ClientCustomizations.ContentUnderstandingClient","com.azure.ai.contentunderstanding.implementation.models.AnalyzeRequest1":"ClientCustomizations.analyze.Request.anonymous","com.azure.ai.contentunderstanding.implementation.models.CopyAnalyzerRequest":"ClientCustomizations.copyAnalyzer.Request.anonymous","com.azure.ai.contentunderstanding.implementation.models.GrantCopyAuthorizationRequest1":"ClientCustomizations.grantCopyAuthorization.Request.anonymous","com.azure.ai.contentunderstanding.models.AnalyzeInput":"ContentUnderstanding.AnalyzeInput","com.azure.ai.contentunderstanding.models.AnalyzeResult":"ContentUnderstanding.AnalyzeResult","com.azure.ai.contentunderstanding.models.AnnotationFormat":"ContentUnderstanding.AnnotationFormat","com.azure.ai.contentunderstanding.models.ArrayField":"ContentUnderstanding.ArrayField","com.azure.ai.contentunderstanding.models.AudioVisualContent":"ContentUnderstanding.AudioVisualContent","com.azure.ai.contentunderstanding.models.AudioVisualContentSegment":"ContentUnderstanding.AudioVisualContentSegment","com.azure.ai.contentunderstanding.models.BooleanField":"ContentUnderstanding.BooleanField","com.azure.ai.contentunderstanding.models.ChartFormat":"ContentUnderstanding.ChartFormat","com.azure.ai.contentunderstanding.models.ContentAnalyzer":"ContentUnderstanding.ContentAnalyzer","com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus":"ContentUnderstanding.ContentAnalyzerAnalyzeOperationStatus","com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig":"ContentUnderstanding.ContentAnalyzerConfig","com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus":"ContentUnderstanding.ContentAnalyzerOperationStatus","com.azure.ai.contentunderstanding.models.ContentAnalyzerStatus":"ContentUnderstanding.ContentAnalyzerStatus","com.azure.ai.contentunderstanding.models.ContentCategoryDefinition":"ContentUnderstanding.ContentCategoryDefinition","com.azure.ai.contentunderstanding.models.ContentField":"ContentUnderstanding.ContentField","com.azure.ai.contentunderstanding.models.ContentFieldDefinition":"ContentUnderstanding.ContentFieldDefinition","com.azure.ai.contentunderstanding.models.ContentFieldSchema":"ContentUnderstanding.FieldSchema","com.azure.ai.contentunderstanding.models.ContentFieldType":"ContentUnderstanding.ContentFieldType","com.azure.ai.contentunderstanding.models.ContentSpan":"ContentUnderstanding.ContentSpan","com.azure.ai.contentunderstanding.models.ContentUnderstandingDefaults":"ContentUnderstanding.ContentUnderstandingDefaults","com.azure.ai.contentunderstanding.models.CopyAuthorization":"ContentUnderstanding.CopyAuthorization","com.azure.ai.contentunderstanding.models.DateField":"ContentUnderstanding.DateField","com.azure.ai.contentunderstanding.models.DocumentAnnotation":"ContentUnderstanding.DocumentAnnotation","com.azure.ai.contentunderstanding.models.DocumentAnnotationComment":"ContentUnderstanding.DocumentAnnotationComment","com.azure.ai.contentunderstanding.models.DocumentAnnotationKind":"ContentUnderstanding.DocumentAnnotationKind","com.azure.ai.contentunderstanding.models.DocumentBarcode":"ContentUnderstanding.DocumentBarcode","com.azure.ai.contentunderstanding.models.DocumentBarcodeKind":"ContentUnderstanding.DocumentBarcodeKind","com.azure.ai.contentunderstanding.models.DocumentCaption":"ContentUnderstanding.DocumentCaption","com.azure.ai.contentunderstanding.models.DocumentChartFigure":"ContentUnderstanding.DocumentChartFigure","com.azure.ai.contentunderstanding.models.DocumentContent":"ContentUnderstanding.DocumentContent","com.azure.ai.contentunderstanding.models.DocumentContentSegment":"ContentUnderstanding.DocumentContentSegment","com.azure.ai.contentunderstanding.models.DocumentFigure":"ContentUnderstanding.DocumentFigure","com.azure.ai.contentunderstanding.models.DocumentFigureKind":"ContentUnderstanding.DocumentFigureKind","com.azure.ai.contentunderstanding.models.DocumentFootnote":"ContentUnderstanding.DocumentFootnote","com.azure.ai.contentunderstanding.models.DocumentFormula":"ContentUnderstanding.DocumentFormula","com.azure.ai.contentunderstanding.models.DocumentFormulaKind":"ContentUnderstanding.DocumentFormulaKind","com.azure.ai.contentunderstanding.models.DocumentHyperlink":"ContentUnderstanding.DocumentHyperlink","com.azure.ai.contentunderstanding.models.DocumentLine":"ContentUnderstanding.DocumentLine","com.azure.ai.contentunderstanding.models.DocumentMermaidFigure":"ContentUnderstanding.DocumentMermaidFigure","com.azure.ai.contentunderstanding.models.DocumentPage":"ContentUnderstanding.DocumentPage","com.azure.ai.contentunderstanding.models.DocumentParagraph":"ContentUnderstanding.DocumentParagraph","com.azure.ai.contentunderstanding.models.DocumentSection":"ContentUnderstanding.DocumentSection","com.azure.ai.contentunderstanding.models.DocumentTable":"ContentUnderstanding.DocumentTable","com.azure.ai.contentunderstanding.models.DocumentTableCell":"ContentUnderstanding.DocumentTableCell","com.azure.ai.contentunderstanding.models.DocumentTableCellKind":"ContentUnderstanding.DocumentTableCellKind","com.azure.ai.contentunderstanding.models.DocumentWord":"ContentUnderstanding.DocumentWord","com.azure.ai.contentunderstanding.models.GenerationMethod":"ContentUnderstanding.GenerationMethod","com.azure.ai.contentunderstanding.models.IntegerField":"ContentUnderstanding.IntegerField","com.azure.ai.contentunderstanding.models.JsonField":"ContentUnderstanding.JsonField","com.azure.ai.contentunderstanding.models.KnowledgeSource":"ContentUnderstanding.KnowledgeSource","com.azure.ai.contentunderstanding.models.KnowledgeSourceKind":"ContentUnderstanding.KnowledgeSourceKind","com.azure.ai.contentunderstanding.models.LabeledDataKnowledgeSource":"ContentUnderstanding.LabeledDataKnowledgeSource","com.azure.ai.contentunderstanding.models.LengthUnit":"ContentUnderstanding.LengthUnit","com.azure.ai.contentunderstanding.models.MediaContent":"ContentUnderstanding.MediaContent","com.azure.ai.contentunderstanding.models.MediaContentKind":"ContentUnderstanding.MediaContentKind","com.azure.ai.contentunderstanding.models.NumberField":"ContentUnderstanding.NumberField","com.azure.ai.contentunderstanding.models.ObjectField":"ContentUnderstanding.ObjectField","com.azure.ai.contentunderstanding.models.OperationState":"Azure.Core.Foundations.OperationState","com.azure.ai.contentunderstanding.models.ProcessingLocation":"ContentUnderstanding.ProcessingLocation","com.azure.ai.contentunderstanding.models.SemanticRole":"ContentUnderstanding.SemanticRole","com.azure.ai.contentunderstanding.models.StringField":"ContentUnderstanding.StringField","com.azure.ai.contentunderstanding.models.SupportedModels":"ContentUnderstanding.SupportedModels","com.azure.ai.contentunderstanding.models.TableFormat":"ContentUnderstanding.TableFormat","com.azure.ai.contentunderstanding.models.TimeField":"ContentUnderstanding.TimeField","com.azure.ai.contentunderstanding.models.TranscriptPhrase":"ContentUnderstanding.TranscriptPhrase","com.azure.ai.contentunderstanding.models.TranscriptWord":"ContentUnderstanding.TranscriptWord","com.azure.ai.contentunderstanding.models.UsageDetails":"ContentUnderstanding.UsageDetails"},"generatedFiles":["src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingAsyncClient.java","src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingClient.java","src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingClientBuilder.java","src/main/java/com/azure/ai/contentunderstanding/ContentUnderstandingServiceVersion.java","src/main/java/com/azure/ai/contentunderstanding/implementation/ContentUnderstandingClientImpl.java","src/main/java/com/azure/ai/contentunderstanding/implementation/JsonMergePatchHelper.java","src/main/java/com/azure/ai/contentunderstanding/implementation/OperationLocationPollingStrategy.java","src/main/java/com/azure/ai/contentunderstanding/implementation/PollingUtils.java","src/main/java/com/azure/ai/contentunderstanding/implementation/SyncOperationLocationPollingStrategy.java","src/main/java/com/azure/ai/contentunderstanding/implementation/models/AnalyzeRequest1.java","src/main/java/com/azure/ai/contentunderstanding/implementation/models/CopyAnalyzerRequest.java","src/main/java/com/azure/ai/contentunderstanding/implementation/models/GrantCopyAuthorizationRequest1.java","src/main/java/com/azure/ai/contentunderstanding/implementation/models/package-info.java","src/main/java/com/azure/ai/contentunderstanding/implementation/package-info.java","src/main/java/com/azure/ai/contentunderstanding/models/AnalyzeInput.java","src/main/java/com/azure/ai/contentunderstanding/models/AnalyzeResult.java","src/main/java/com/azure/ai/contentunderstanding/models/AnnotationFormat.java","src/main/java/com/azure/ai/contentunderstanding/models/ArrayField.java","src/main/java/com/azure/ai/contentunderstanding/models/AudioVisualContent.java","src/main/java/com/azure/ai/contentunderstanding/models/AudioVisualContentSegment.java","src/main/java/com/azure/ai/contentunderstanding/models/BooleanField.java","src/main/java/com/azure/ai/contentunderstanding/models/ChartFormat.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzer.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerAnalyzeOperationStatus.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerConfig.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerOperationStatus.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentAnalyzerStatus.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentCategoryDefinition.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentField.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldDefinition.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldSchema.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentFieldType.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentSpan.java","src/main/java/com/azure/ai/contentunderstanding/models/ContentUnderstandingDefaults.java","src/main/java/com/azure/ai/contentunderstanding/models/CopyAuthorization.java","src/main/java/com/azure/ai/contentunderstanding/models/DateField.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotation.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotationComment.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentAnnotationKind.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentBarcode.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentBarcodeKind.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentCaption.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentChartFigure.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentContent.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentContentSegment.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentFigure.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentFigureKind.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentFootnote.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentFormula.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentFormulaKind.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentHyperlink.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentLine.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentMermaidFigure.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentPage.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentParagraph.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentSection.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentTable.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentTableCell.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentTableCellKind.java","src/main/java/com/azure/ai/contentunderstanding/models/DocumentWord.java","src/main/java/com/azure/ai/contentunderstanding/models/GenerationMethod.java","src/main/java/com/azure/ai/contentunderstanding/models/IntegerField.java","src/main/java/com/azure/ai/contentunderstanding/models/JsonField.java","src/main/java/com/azure/ai/contentunderstanding/models/KnowledgeSource.java","src/main/java/com/azure/ai/contentunderstanding/models/KnowledgeSourceKind.java","src/main/java/com/azure/ai/contentunderstanding/models/LabeledDataKnowledgeSource.java","src/main/java/com/azure/ai/contentunderstanding/models/LengthUnit.java","src/main/java/com/azure/ai/contentunderstanding/models/MediaContent.java","src/main/java/com/azure/ai/contentunderstanding/models/MediaContentKind.java","src/main/java/com/azure/ai/contentunderstanding/models/NumberField.java","src/main/java/com/azure/ai/contentunderstanding/models/ObjectField.java","src/main/java/com/azure/ai/contentunderstanding/models/OperationState.java","src/main/java/com/azure/ai/contentunderstanding/models/ProcessingLocation.java","src/main/java/com/azure/ai/contentunderstanding/models/SemanticRole.java","src/main/java/com/azure/ai/contentunderstanding/models/StringField.java","src/main/java/com/azure/ai/contentunderstanding/models/SupportedModels.java","src/main/java/com/azure/ai/contentunderstanding/models/TableFormat.java","src/main/java/com/azure/ai/contentunderstanding/models/TimeField.java","src/main/java/com/azure/ai/contentunderstanding/models/TranscriptPhrase.java","src/main/java/com/azure/ai/contentunderstanding/models/TranscriptWord.java","src/main/java/com/azure/ai/contentunderstanding/models/UsageDetails.java","src/main/java/com/azure/ai/contentunderstanding/models/package-info.java","src/main/java/com/azure/ai/contentunderstanding/package-info.java","src/main/java/module-info.java"]} \ No newline at end of file diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/resources/azure-ai-contentunderstanding.properties b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/resources/azure-ai-contentunderstanding.properties new file mode 100644 index 000000000000..ca812989b4f2 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/main/resources/azure-ai-contentunderstanding.properties @@ -0,0 +1,2 @@ +name=${project.artifactId} +version=${project.version} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample00_UpdateDefaults.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample00_UpdateDefaults.java new file mode 100644 index 000000000000..671e65689898 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample00_UpdateDefaults.java @@ -0,0 +1,117 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentUnderstandingDefaults; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.HashMap; +import java.util.Map; + +/** + * Sample demonstrating how to configure and manage default settings for Content Understanding service. + * This sample shows: + * 1. Getting current default configuration + * 2. Updating default configuration with your model deployments + * 3. Verifying the updated configuration + * + *

Prerequisites:

+ *

Before running this sample, make sure you have:

+ *
    + *
  1. Created a Microsoft Foundry resource (see README.md)
  2. + *
  3. Deployed the required models (gpt-4.1, gpt-4.1-mini, text-embedding-3-large)
  4. + *
  5. Set the environment variables: + *
      + *
    • {@code CONTENTUNDERSTANDING_ENDPOINT} - Your Foundry resource endpoint
    • + *
    • {@code CONTENTUNDERSTANDING_KEY} - (Optional) Your API key
    • + *
    • {@code GPT_4_1_DEPLOYMENT} - Your GPT-4.1 deployment name
    • + *
    • {@code GPT_4_1_MINI_DEPLOYMENT} - Your GPT-4.1-mini deployment name
    • + *
    • {@code TEXT_EMBEDDING_3_LARGE_DEPLOYMENT} - Your text-embedding-3-large deployment name
    • + *
    + *
  6. + *
+ * + *

This sample demonstrates the one-time setup required to map your deployed models + * to those required by prebuilt and custom analyzers.

+ */ +public class Sample00_UpdateDefaults { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample00.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample00.buildClient + + // Step 1: Get current defaults to see what's configured + System.out.println("Getting current default configuration..."); + ContentUnderstandingDefaults currentDefaults = client.getDefaults(); + System.out.println("Current defaults retrieved successfully."); + System.out.println("Current model deployments: " + currentDefaults.getModelDeployments()); + + // Step 2: Configure model deployments from environment variables + // These map model names to your deployed model names in Azure AI Foundry + System.out.println("\nConfiguring model deployments from environment variables..."); + + // Get deployment names from environment variables + String gpt41Deployment = getEnvOrDefault("GPT_4_1_DEPLOYMENT", "gpt-4.1"); + String gpt41MiniDeployment = getEnvOrDefault("GPT_4_1_MINI_DEPLOYMENT", "gpt-4.1-mini"); + String textEmbedding3LargeDeployment + = getEnvOrDefault("TEXT_EMBEDDING_3_LARGE_DEPLOYMENT", "text-embedding-3-large"); + + // Create model deployments map + Map modelDeployments = new HashMap<>(); + modelDeployments.put("gpt-4.1", gpt41Deployment); + modelDeployments.put("gpt-4.1-mini", gpt41MiniDeployment); + modelDeployments.put("text-embedding-3-large", textEmbedding3LargeDeployment); + + System.out.println("Model deployments to configure:"); + System.out.println(" gpt-4.1 -> " + gpt41Deployment); + System.out.println(" gpt-4.1-mini -> " + gpt41MiniDeployment); + System.out.println(" text-embedding-3-large -> " + textEmbedding3LargeDeployment); + + // Step 3: Update defaults with the new configuration + System.out.println("\nUpdating default configuration..."); + + // Update defaults with the configuration using the typed convenience method + ContentUnderstandingDefaults updatedConfig = client.updateDefaults(modelDeployments); + System.out.println("Defaults updated successfully."); + System.out.println("Updated model deployments: " + updatedConfig.getModelDeployments()); + + // Step 4: Verify the updated configuration + System.out.println("\nVerifying updated configuration..."); + ContentUnderstandingDefaults updatedDefaults = client.getDefaults(); + System.out.println("Updated defaults verified successfully."); + System.out.println("Updated model deployments: " + updatedDefaults.getModelDeployments()); + + System.out.println("\nConfiguration management completed."); + } + + /** + * Gets an environment variable value or returns a default value if not set. + * + * @param envVar the environment variable name + * @param defaultValue the default value to return if the environment variable is not set + * @return the environment variable value or the default value + */ + private static String getEnvOrDefault(String envVar, String defaultValue) { + String value = System.getenv(envVar); + return (value != null && !value.trim().isEmpty()) ? value : defaultValue; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample00_UpdateDefaultsAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample00_UpdateDefaultsAsync.java new file mode 100644 index 000000000000..ce6a1b66512d --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample00_UpdateDefaultsAsync.java @@ -0,0 +1,148 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentUnderstandingDefaults; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrating how to configure and manage default settings for Content Understanding service. + * This sample shows: + * 1. Getting current default configuration + * 2. Updating default configuration with your model deployments + * 3. Verifying the updated configuration + * + *

Prerequisites:

+ *

Before running this sample, make sure you have:

+ *
    + *
  1. Created a Microsoft Foundry resource (see README.md)
  2. + *
  3. Deployed the required models (gpt-4.1, gpt-4.1-mini, text-embedding-3-large)
  4. + *
  5. Set the environment variables: + *
      + *
    • {@code CONTENTUNDERSTANDING_ENDPOINT} - Your Foundry resource endpoint
    • + *
    • {@code CONTENTUNDERSTANDING_KEY} - (Optional) Your API key
    • + *
    • {@code GPT_4_1_DEPLOYMENT} - Your GPT-4.1 deployment name
    • + *
    • {@code GPT_4_1_MINI_DEPLOYMENT} - Your GPT-4.1-mini deployment name
    • + *
    • {@code TEXT_EMBEDDING_3_LARGE_DEPLOYMENT} - Your text-embedding-3-large deployment name
    • + *
    + *
  6. + *
+ * + *

This sample demonstrates the one-time setup required to map your deployed models + * to those required by prebuilt and custom analyzers.

+ */ +public class Sample00_UpdateDefaultsAsync { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample00Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample00Async.buildClient + + // Step 1: Get current defaults to see what's configured + System.out.println("Getting current default configuration..."); + + // Chain all operations reactively + client.getDefaults() + .doOnNext(currentDefaults -> { + System.out.println("Current defaults retrieved successfully."); + System.out.println("Current model deployments: " + currentDefaults.getModelDeployments()); + }) + .flatMap(currentDefaults -> { + // Step 2: Configure model deployments from environment variables + // These map model names to your deployed model names in Azure AI Foundry + System.out.println("\nConfiguring model deployments from environment variables..."); + + // Get deployment names from environment variables + String gpt41Deployment = getEnvOrDefault("GPT_4_1_DEPLOYMENT", "gpt-4.1"); + String gpt41MiniDeployment = getEnvOrDefault("GPT_4_1_MINI_DEPLOYMENT", "gpt-4.1-mini"); + String textEmbedding3LargeDeployment + = getEnvOrDefault("TEXT_EMBEDDING_3_LARGE_DEPLOYMENT", "text-embedding-3-large"); + + // Create model deployments map + Map modelDeployments = new HashMap<>(); + modelDeployments.put("gpt-4.1", gpt41Deployment); + modelDeployments.put("gpt-4.1-mini", gpt41MiniDeployment); + modelDeployments.put("text-embedding-3-large", textEmbedding3LargeDeployment); + + System.out.println("Model deployments to configure:"); + System.out.println(" gpt-4.1 -> " + gpt41Deployment); + System.out.println(" gpt-4.1-mini -> " + gpt41MiniDeployment); + System.out.println(" text-embedding-3-large -> " + textEmbedding3LargeDeployment); + + // Step 3: Update defaults with the new configuration + System.out.println("\nUpdating default configuration..."); + return client.updateDefaults(modelDeployments); + }) + .doOnNext(updatedConfig -> { + System.out.println("Defaults updated successfully."); + System.out.println("Updated model deployments: " + updatedConfig.getModelDeployments()); + }) + .flatMap(updatedConfig -> { + // Step 4: Verify the updated configuration + System.out.println("\nVerifying updated configuration..."); + return client.getDefaults(); + }) + .doOnNext(updatedDefaults -> { + System.out.println("Updated defaults verified successfully."); + System.out.println("Updated model deployments: " + updatedDefaults.getModelDeployments()); + System.out.println("\nConfiguration management completed."); + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.SECONDS.sleep(10); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } + + /** + * Gets an environment variable value or returns a default value if not set. + * + * @param envVar the environment variable name + * @param defaultValue the default value to return if the environment variable is not set + * @return the environment variable value or the default value + */ + private static String getEnvOrDefault(String envVar, String defaultValue) { + String value = System.getenv(envVar); + return (value != null && !value.trim().isEmpty()) ? value : defaultValue; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample01_AnalyzeBinary.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample01_AnalyzeBinary.java new file mode 100644 index 000000000000..15bba308d431 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample01_AnalyzeBinary.java @@ -0,0 +1,130 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentPage; +import com.azure.ai.contentunderstanding.models.DocumentTable; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +/** + * Sample demonstrating how to analyze binary documents using Content Understanding service. + * This sample shows: + * 1. Loading a binary file (PDF) + * 2. Analyzing the document + * 3. Extracting markdown content + * 4. Accessing document properties (pages, tables, etc.) + */ +public class Sample01_AnalyzeBinary { + + public static void main(String[] args) throws IOException { + // BEGIN: com.azure.ai.contentunderstanding.sample01.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample01.buildClient + + // Load the sample file + String filePath = "src/samples/resources/sample_invoice.pdf"; + Path path = Paths.get(filePath); + byte[] fileBytes = Files.readAllBytes(path); + BinaryData binaryData = BinaryData.fromBytes(fileBytes); + + // BEGIN:ContentUnderstandingAnalyzeBinary + // Use the simplified beginAnalyzeBinary overload - contentType defaults to "application/octet-stream" + // For PDFs, you can also explicitly specify "application/pdf" using the full method signature + SyncPoller operation + = client.beginAnalyzeBinary("prebuilt-documentSearch", binaryData); + + AnalyzeResult result = operation.getFinalResult(); + // END:ContentUnderstandingAnalyzeBinary + + System.out.println("Analysis operation completed"); + System.out.println("Analysis result contains " + + (result.getContents() != null ? result.getContents().size() : 0) + " content(s)"); + + // BEGIN:ContentUnderstandingExtractMarkdown + // A PDF file has only one content element even if it contains multiple pages + MediaContent content = null; + if (result.getContents() == null || result.getContents().isEmpty()) { + System.out.println("(No content returned from analysis)"); + } else { + content = result.getContents().get(0); + if (content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out.println(content.getMarkdown()); + } else { + System.out.println("(No markdown content available)"); + } + } + // END:ContentUnderstandingExtractMarkdown + + if (content != null && content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out + .println("Markdown content extracted successfully (" + content.getMarkdown().length() + " characters)"); + } + + // BEGIN:ContentUnderstandingAccessDocumentProperties + // Check if this is document content to access document-specific properties + if (content instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) content; + System.out.println("Document type: " + + (documentContent.getMimeType() != null ? documentContent.getMimeType() : "(unknown)")); + System.out.println("Start page: " + documentContent.getStartPageNumber()); + System.out.println("End page: " + documentContent.getEndPageNumber()); + System.out.println( + "Total pages: " + (documentContent.getEndPageNumber() - documentContent.getStartPageNumber() + 1)); + + // Check for pages + if (documentContent.getPages() != null && !documentContent.getPages().isEmpty()) { + System.out.println("Number of pages: " + documentContent.getPages().size()); + for (DocumentPage page : documentContent.getPages()) { + String unit = documentContent.getUnit() != null ? documentContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } + + // Check for tables + if (documentContent.getTables() != null && !documentContent.getTables().isEmpty()) { + System.out.println("Number of tables: " + documentContent.getTables().size()); + int tableCounter = 1; + for (DocumentTable table : documentContent.getTables()) { + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns"); + tableCounter++; + } + } + } else { + System.out.println("Content is MediaContent (not document-specific), skipping document properties"); + } + // END:ContentUnderstandingAccessDocumentProperties + + System.out.println("\nBinary document analysis completed successfully"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample01_AnalyzeBinaryAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample01_AnalyzeBinaryAsync.java new file mode 100644 index 000000000000..96bb5f3baf74 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample01_AnalyzeBinaryAsync.java @@ -0,0 +1,165 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentPage; +import com.azure.ai.contentunderstanding.models.DocumentTable; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrating how to analyze binary documents using Content Understanding service. + * This sample shows: + * 1. Loading a binary file (PDF) + * 2. Analyzing the document + * 3. Extracting markdown content + * 4. Accessing document properties (pages, tables, etc.) + */ +public class Sample01_AnalyzeBinaryAsync { + + public static void main(String[] args) throws IOException { + // BEGIN: com.azure.ai.contentunderstanding.sample01Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample01Async.buildClient + + // Load the sample file + String filePath = "src/samples/resources/sample_invoice.pdf"; + Path path = Paths.get(filePath); + byte[] fileBytes = Files.readAllBytes(path); + BinaryData binaryData = BinaryData.fromBytes(fileBytes); + + // BEGIN:ContentUnderstandingAnalyzeBinaryAsyncAsync + // Use the simplified beginAnalyzeBinary overload - contentType defaults to "application/octet-stream" + // For PDFs, you can also explicitly specify "application/pdf" using the full method signature + PollerFlux operation + = client.beginAnalyzeBinary("prebuilt-documentSearch", binaryData); + + operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + System.out.println("Analysis operation completed"); + System.out.println("Analysis result contains " + + (result.getContents() != null ? result.getContents().size() : 0) + " content(s)"); + + // BEGIN:ContentUnderstandingExtractMarkdownAsync + // A PDF file has only one content element even if it contains multiple pages + MediaContent content = null; + if (result.getContents() == null || result.getContents().isEmpty()) { + System.out.println("(No content returned from analysis)"); + } else { + content = result.getContents().get(0); + if (content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out.println(content.getMarkdown()); + } else { + System.out.println("(No markdown content available)"); + } + } + // END:ContentUnderstandingExtractMarkdownAsync + + if (content != null && content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out.println("Markdown content extracted successfully (" + + content.getMarkdown().length() + " characters)"); + } + + // BEGIN:ContentUnderstandingAccessDocumentPropertiesAsync + // Check if this is document content to access document-specific properties + if (content instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) content; + System.out.println("Document type: " + + (documentContent.getMimeType() != null ? documentContent.getMimeType() : "(unknown)")); + System.out.println("Start page: " + documentContent.getStartPageNumber()); + System.out.println("End page: " + documentContent.getEndPageNumber()); + System.out.println("Total pages: " + + (documentContent.getEndPageNumber() - documentContent.getStartPageNumber() + 1)); + + // Check for pages + if (documentContent.getPages() != null && !documentContent.getPages().isEmpty()) { + System.out.println("Number of pages: " + documentContent.getPages().size()); + for (DocumentPage page : documentContent.getPages()) { + String unit + = documentContent.getUnit() != null ? documentContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } + + // Check for tables + if (documentContent.getTables() != null && !documentContent.getTables().isEmpty()) { + System.out.println("Number of tables: " + documentContent.getTables().size()); + int tableCounter = 1; + for (DocumentTable table : documentContent.getTables()) { + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns"); + tableCounter++; + } + } + } else { + System.out.println("Content is MediaContent (not document-specific), skipping document properties"); + } + // END:ContentUnderstandingAccessDocumentPropertiesAsync + + System.out.println("\nBinary document analysis completed successfully"); + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + // END:ContentUnderstandingAnalyzeBinaryAsyncAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.MINUTES.sleep(1); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample02_AnalyzeUrl.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample02_AnalyzeUrl.java new file mode 100644 index 000000000000..a6edc4a44254 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample02_AnalyzeUrl.java @@ -0,0 +1,284 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.AudioVisualContent; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentPage; +import com.azure.ai.contentunderstanding.models.DocumentTable; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.ai.contentunderstanding.models.TranscriptPhrase; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.Arrays; +import java.util.List; + +/** + * Sample demonstrating how to analyze documents from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to a document + * 2. Analyzing the document + * 3. Extracting markdown content + * 4. Accessing document properties (pages, tables, etc.) + * + * Additional samples demonstrate analyzing different media types: + * - {@link #analyzeVideoUrl()} - Analyze video files + * - {@link #analyzeAudioUrl()} - Analyze audio files + * - {@link #analyzeImageUrl()} - Analyze image files + */ +public class Sample02_AnalyzeUrl { + + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample02.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample02.buildClient + + System.out.println("--- Document Analysis Example ---"); + analyzeDocumentUrl(client); + + System.out.println("\n--- Video Analysis Example ---"); + analyzeVideoUrl(client); + + System.out.println("\n--- Audio Analysis Example ---"); + analyzeAudioUrl(client); + + System.out.println("\n--- Image Analysis Example ---"); + analyzeImageUrl(client); + } + + /** + * Sample demonstrating how to analyze documents from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to a document + * 2. Analyzing the document + * 3. Extracting markdown content + * 4. Accessing document properties (pages, tables, etc.) + */ + public static void analyzeDocumentUrl(ContentUnderstandingClient client) { + // BEGIN:ContentUnderstandingAnalyzeUrl + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + SyncPoller operation + = client.beginAnalyze("prebuilt-documentSearch", Arrays.asList(input)); + + AnalyzeResult result = operation.getFinalResult(); + // END:ContentUnderstandingAnalyzeUrl + + System.out.println("Analysis operation completed"); + System.out.println("Analysis result contains " + + (result.getContents() != null ? result.getContents().size() : 0) + " content(s)"); + + // A PDF file has only one content element even if it contains multiple pages + MediaContent content = null; + if (result.getContents() == null || result.getContents().isEmpty()) { + System.out.println("(No content returned from analysis)"); + } else { + content = result.getContents().get(0); + if (content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out.println(content.getMarkdown()); + } else { + System.out.println("(No markdown content available)"); + } + } + + if (content != null && content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out + .println("Markdown content extracted successfully (" + content.getMarkdown().length() + " characters)"); + } + + // Check if this is document content to access document-specific properties + if (content instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) content; + System.out.println("Document type: " + + (documentContent.getMimeType() != null ? documentContent.getMimeType() : "(unknown)")); + System.out.println("Start page: " + documentContent.getStartPageNumber()); + System.out.println("End page: " + documentContent.getEndPageNumber()); + System.out.println( + "Total pages: " + (documentContent.getEndPageNumber() - documentContent.getStartPageNumber() + 1)); + + // Check for pages + if (documentContent.getPages() != null && !documentContent.getPages().isEmpty()) { + System.out.println("Number of pages: " + documentContent.getPages().size()); + for (DocumentPage page : documentContent.getPages()) { + String unit = documentContent.getUnit() != null ? documentContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } + + // Check for tables + if (documentContent.getTables() != null && !documentContent.getTables().isEmpty()) { + System.out.println("Number of tables: " + documentContent.getTables().size()); + int tableCounter = 1; + for (DocumentTable table : documentContent.getTables()) { + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns"); + tableCounter++; + } + } + } else { + System.out.println("Content is MediaContent (not document-specific), skipping document properties"); + } + + System.out.println("\nURL document analysis completed successfully"); + } + + /** + * Sample demonstrating how to analyze video from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to a video file + * 2. Analyzing the video with prebuilt-videoSearch analyzer + * 3. Iterating through video segments + * 4. Accessing audio/visual properties (timing, summary, frame size) + */ + public static void analyzeVideoUrl(ContentUnderstandingClient client) { + // BEGIN:ContentUnderstandingAnalyzeVideoUrl + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/videos/sdk_samples/FlightSimulator.mp4"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + SyncPoller operation + = client.beginAnalyze("prebuilt-videoSearch", Arrays.asList(input)); + + AnalyzeResult result = operation.getFinalResult(); + + // prebuilt-videoSearch can detect video segments, so we should iterate through all segments + int segmentIndex = 1; + for (MediaContent media : result.getContents()) { + // Cast MediaContent to AudioVisualContent to access audio/visual-specific properties + // AudioVisualContent derives from MediaContent and provides additional properties + // to access full information about audio/video, including timing, transcript phrases, and many others + AudioVisualContent videoContent = (AudioVisualContent) media; + System.out.println("--- Segment " + segmentIndex + " ---"); + System.out.println("Markdown:"); + System.out.println(videoContent.getMarkdown()); + + String summary = videoContent.getFields() != null && videoContent.getFields().containsKey("Summary") + ? (videoContent.getFields().get("Summary").getValue() != null + ? videoContent.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + + System.out.println("Start: " + videoContent.getStartTimeMs() + " ms, End: " + videoContent.getEndTimeMs() + " ms"); + System.out.println("Frame size: " + videoContent.getWidth() + " x " + videoContent.getHeight()); + + System.out.println("---------------------"); + segmentIndex++; + } + // END:ContentUnderstandingAnalyzeVideoUrl + } + + /** + * Sample demonstrating how to analyze audio from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to an audio file + * 2. Analyzing the audio with prebuilt-audioSearch analyzer + * 3. Accessing audio/visual properties (timing, summary, transcript) + */ + public static void analyzeAudioUrl(ContentUnderstandingClient client) { + // BEGIN:ContentUnderstandingAnalyzeAudioUrl + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/audio/callCenterRecording.mp3"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + SyncPoller operation + = client.beginAnalyze("prebuilt-audioSearch", Arrays.asList(input)); + + AnalyzeResult result = operation.getFinalResult(); + + // Cast MediaContent to AudioVisualContent to access audio/visual-specific properties + // AudioVisualContent derives from MediaContent and provides additional properties + // to access full information about audio/video, including timing, transcript phrases, and many others + AudioVisualContent audioContent = (AudioVisualContent) result.getContents().get(0); + System.out.println("Markdown:"); + System.out.println(audioContent.getMarkdown()); + + String summary = audioContent.getFields() != null && audioContent.getFields().containsKey("Summary") + ? (audioContent.getFields().get("Summary").getValue() != null + ? audioContent.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + + // Example: Access an additional field in AudioVisualContent (transcript phrases) + List transcriptPhrases = audioContent.getTranscriptPhrases(); + if (transcriptPhrases != null && !transcriptPhrases.isEmpty()) { + System.out.println("Transcript (first two phrases):"); + int count = 0; + for (TranscriptPhrase phrase : transcriptPhrases) { + if (count >= 2) { + break; + } + System.out.println(" [" + phrase.getSpeaker() + "] " + phrase.getStartTimeMs() + " ms: " + phrase.getText()); + count++; + } + } + // END:ContentUnderstandingAnalyzeAudioUrl + } + + /** + * Sample demonstrating how to analyze image from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to an image file + * 2. Analyzing the image with prebuilt-imageSearch analyzer + * 3. Accessing image properties (markdown, summary) + */ + public static void analyzeImageUrl(ContentUnderstandingClient client) { + // BEGIN:ContentUnderstandingAnalyzeImageUrl + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/image/pieChart.jpg"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + SyncPoller operation + = client.beginAnalyze("prebuilt-imageSearch", Arrays.asList(input)); + + AnalyzeResult result = operation.getFinalResult(); + + MediaContent content = result.getContents().get(0); + System.out.println("Markdown:"); + System.out.println(content.getMarkdown()); + + String summary = content.getFields() != null && content.getFields().containsKey("Summary") + ? (content.getFields().get("Summary").getValue() != null + ? content.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + // END:ContentUnderstandingAnalyzeImageUrl + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample02_AnalyzeUrlAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample02_AnalyzeUrlAsync.java new file mode 100644 index 000000000000..91acc5e84176 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample02_AnalyzeUrlAsync.java @@ -0,0 +1,419 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.AudioVisualContent; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentPage; +import com.azure.ai.contentunderstanding.models.DocumentTable; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.ai.contentunderstanding.models.TranscriptPhrase; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CountDownLatch; + +/** + * Sample demonstrating how to analyze documents from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to a document + * 2. Analyzing the document + * 3. Extracting markdown content + * 4. Accessing document properties (pages, tables, etc.) + */ +public class Sample02_AnalyzeUrlAsync { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample02Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample02Async.buildClient + + System.out.println("--- Document Analysis Example ---"); + analyzeDocumentUrl(client); + + System.out.println("\n--- Video Analysis Example ---"); + analyzeVideoUrl(client); + + System.out.println("\n--- Audio Analysis Example ---"); + analyzeAudioUrl(client); + + System.out.println("\n--- Image Analysis Example ---"); + analyzeImageUrl(client); + } + + /** + * Sample demonstrating how to analyze documents from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to a document + * 2. Analyzing the document + * 3. Extracting markdown content + * 4. Accessing document properties (pages, tables, etc.) + */ + public static void analyzeDocumentUrl(ContentUnderstandingAsyncClient client) { + // BEGIN:ContentUnderstandingAnalyzeUrlAsyncAsync + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + PollerFlux operation + = client.beginAnalyze("prebuilt-documentSearch", Arrays.asList(input)); + + CountDownLatch latch = new CountDownLatch(1); + + operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + System.out.println("Analysis operation completed"); + System.out.println("Analysis result contains " + + (result.getContents() != null ? result.getContents().size() : 0) + " content(s)"); + + // A PDF file has only one content element even if it contains multiple pages + MediaContent content = null; + if (result.getContents() == null || result.getContents().isEmpty()) { + System.out.println("(No content returned from analysis)"); + } else { + content = result.getContents().get(0); + if (content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out.println(content.getMarkdown()); + } else { + System.out.println("(No markdown content available)"); + } + } + + if (content != null && content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out.println("Markdown content extracted successfully (" + + content.getMarkdown().length() + " characters)"); + } + + // Check if this is document content to access document-specific properties + if (content instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) content; + System.out.println("Document type: " + + (documentContent.getMimeType() != null ? documentContent.getMimeType() : "(unknown)")); + System.out.println("Start page: " + documentContent.getStartPageNumber()); + System.out.println("End page: " + documentContent.getEndPageNumber()); + System.out.println("Total pages: " + + (documentContent.getEndPageNumber() - documentContent.getStartPageNumber() + 1)); + + // Check for pages + if (documentContent.getPages() != null && !documentContent.getPages().isEmpty()) { + System.out.println("Number of pages: " + documentContent.getPages().size()); + for (DocumentPage page : documentContent.getPages()) { + String unit + = documentContent.getUnit() != null ? documentContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } + + // Check for tables + if (documentContent.getTables() != null && !documentContent.getTables().isEmpty()) { + System.out.println("Number of tables: " + documentContent.getTables().size()); + int tableCounter = 1; + for (DocumentTable table : documentContent.getTables()) { + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns"); + tableCounter++; + } + } + } else { + System.out.println("Content is MediaContent (not document-specific), skipping document properties"); + } + + System.out.println("\nURL document analysis completed successfully"); + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + latch.countDown(); + }, + error -> { + // Error already handled in doOnError + latch.countDown(); + } + ); + + // Wait for the async operation to complete before returning + try { + latch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Interrupted while waiting for document analysis", e); + } + // END:ContentUnderstandingAnalyzeUrlAsyncAsync + } + + /** + * Sample demonstrating how to analyze video from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to a video file + * 2. Analyzing the video with prebuilt-videoSearch analyzer + * 3. Iterating through video segments + * 4. Accessing audio/visual properties (timing, summary, frame size) + */ + public static void analyzeVideoUrl(ContentUnderstandingAsyncClient client) { + // BEGIN:ContentUnderstandingAnalyzeVideoUrlAsyncAsync + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/videos/sdk_samples/FlightSimulator.mp4"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + PollerFlux operation + = client.beginAnalyze("prebuilt-videoSearch", Arrays.asList(input)); + + CountDownLatch latch = new CountDownLatch(1); + + operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + // prebuilt-videoSearch can detect video segments, so we should iterate through all segments + int segmentIndex = 1; + for (MediaContent media : result.getContents()) { + // Cast MediaContent to AudioVisualContent to access audio/visual-specific properties + // AudioVisualContent derives from MediaContent and provides additional properties + // to access full information about audio/video, including timing, transcript phrases, and many others + AudioVisualContent videoContent = (AudioVisualContent) media; + System.out.println("--- Segment " + segmentIndex + " ---"); + System.out.println("Markdown:"); + System.out.println(videoContent.getMarkdown()); + + String summary = videoContent.getFields() != null && videoContent.getFields().containsKey("Summary") + ? (videoContent.getFields().get("Summary").getValue() != null + ? videoContent.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + + System.out.println("Start: " + videoContent.getStartTimeMs() + " ms, End: " + + videoContent.getEndTimeMs() + " ms"); + System.out.println("Frame size: " + videoContent.getWidth() + " x " + videoContent.getHeight()); + + System.out.println("---------------------"); + segmentIndex++; + } + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + latch.countDown(); + }, + error -> { + // Error already handled in doOnError + latch.countDown(); + } + ); + + // Wait for the async operation to complete before returning + try { + latch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Interrupted while waiting for video analysis", e); + } + // END:ContentUnderstandingAnalyzeVideoUrlAsyncAsync + } + + /** + * Sample demonstrating how to analyze audio from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to an audio file + * 2. Analyzing the audio with prebuilt-audioSearch analyzer + * 3. Accessing audio/visual properties (timing, summary, transcript) + */ + public static void analyzeAudioUrl(ContentUnderstandingAsyncClient client) { + // BEGIN:ContentUnderstandingAnalyzeAudioUrlAsyncAsync + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/audio/callCenterRecording.mp3"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + PollerFlux operation + = client.beginAnalyze("prebuilt-audioSearch", Arrays.asList(input)); + + CountDownLatch latch = new CountDownLatch(1); + + operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + // Cast MediaContent to AudioVisualContent to access audio/visual-specific properties + // AudioVisualContent derives from MediaContent and provides additional properties + // to access full information about audio/video, including timing, transcript phrases, and many others + AudioVisualContent audioContent = (AudioVisualContent) result.getContents().get(0); + System.out.println("Markdown:"); + System.out.println(audioContent.getMarkdown()); + + String summary = audioContent.getFields() != null && audioContent.getFields().containsKey("Summary") + ? (audioContent.getFields().get("Summary").getValue() != null + ? audioContent.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + + // Example: Access an additional field in AudioVisualContent (transcript phrases) + List transcriptPhrases = audioContent.getTranscriptPhrases(); + if (transcriptPhrases != null && !transcriptPhrases.isEmpty()) { + System.out.println("Transcript (first two phrases):"); + int count = 0; + for (TranscriptPhrase phrase : transcriptPhrases) { + if (count >= 2) { + break; + } + System.out.println(" [" + phrase.getSpeaker() + "] " + phrase.getStartTimeMs() + " ms: " + + phrase.getText()); + count++; + } + } + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + latch.countDown(); + }, + error -> { + // Error already handled in doOnError + latch.countDown(); + } + ); + + // Wait for the async operation to complete before returning + try { + latch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Interrupted while waiting for audio analysis", e); + } + // END:ContentUnderstandingAnalyzeAudioUrlAsyncAsync + } + + /** + * Sample demonstrating how to analyze image from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to an image file + * 2. Analyzing the image with prebuilt-imageSearch analyzer + * 3. Accessing image properties (markdown, summary) + */ + public static void analyzeImageUrl(ContentUnderstandingAsyncClient client) { + // BEGIN:ContentUnderstandingAnalyzeImageUrlAsyncAsync + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/image/pieChart.jpg"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + PollerFlux operation + = client.beginAnalyze("prebuilt-imageSearch", Arrays.asList(input)); + + CountDownLatch latch = new CountDownLatch(1); + + operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + MediaContent content = result.getContents().get(0); + System.out.println("Markdown:"); + System.out.println(content.getMarkdown()); + + String summary = content.getFields() != null && content.getFields().containsKey("Summary") + ? (content.getFields().get("Summary").getValue() != null + ? content.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + latch.countDown(); + }, + error -> { + // Error already handled in doOnError + latch.countDown(); + } + ); + + // Wait for the async operation to complete before returning + try { + latch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Interrupted while waiting for image analysis", e); + } + // END:ContentUnderstandingAnalyzeImageUrlAsyncAsync + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample03_AnalyzeInvoice.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample03_AnalyzeInvoice.java new file mode 100644 index 000000000000..a37e7b241c41 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample03_AnalyzeInvoice.java @@ -0,0 +1,191 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ArrayField; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.ContentSpan; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.ai.contentunderstanding.models.ObjectField; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.Arrays; +import java.util.List; + +/** + * Sample demonstrating how to analyze invoices using Content Understanding service. + * This sample shows: + * 1. Analyzing an invoice document + * 2. Extracting structured invoice fields + * 3. Accessing nested object fields (TotalAmount) + * 4. Accessing array fields (LineItems) + * 5. Working with field confidence and source information + */ +public class Sample03_AnalyzeInvoice { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample03.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample03.buildClient + + // BEGIN:ContentUnderstandingAnalyzeInvoice + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String invoiceUrl + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(invoiceUrl); + + SyncPoller operation + = client.beginAnalyze("prebuilt-invoice", Arrays.asList(input)); + + AnalyzeResult result = operation.getFinalResult(); + // END:ContentUnderstandingAnalyzeInvoice + + System.out.println("Analysis operation completed"); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + + // BEGIN:ContentUnderstandingExtractInvoiceFields + // Get the document content (invoices are documents) + MediaContent firstContent = result.getContents().get(0); + if (firstContent instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) firstContent; + + // Print document unit information + System.out.println("Document unit: " + + (documentContent.getUnit() != null ? documentContent.getUnit().toString() : "unknown")); + System.out.println( + "Pages: " + documentContent.getStartPageNumber() + " to " + documentContent.getEndPageNumber()); + System.out.println(); + + // Extract simple string fields using getValue() convenience method + // getValue() returns the typed value regardless of field type (StringField, NumberField, DateField, etc.) + ContentField customerNameField + = documentContent.getFields() != null ? documentContent.getFields().get("CustomerName") : null; + ContentField invoiceDateField + = documentContent.getFields() != null ? documentContent.getFields().get("InvoiceDate") : null; + + // Use getValue() instead of casting to specific types + // Note: getValue() returns the actual typed value - String, Number, LocalDate, etc. + String customerName = customerNameField != null ? (String) customerNameField.getValue() : null; + // InvoiceDate is a DateField, so getValue() returns LocalDate - convert to String for display + Object invoiceDateValue = invoiceDateField != null ? invoiceDateField.getValue() : null; + String invoiceDate = invoiceDateValue != null ? invoiceDateValue.toString() : null; + + System.out.println("Customer Name: " + (customerName != null ? customerName : "(None)")); + if (customerNameField != null) { + System.out.println(" Confidence: " + (customerNameField.getConfidence() != null + ? String.format("%.2f", customerNameField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (customerNameField.getSource() != null ? customerNameField.getSource() : "N/A")); + List spans = customerNameField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out + .println(" Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + System.out.println("Invoice Date: " + (invoiceDate != null ? invoiceDate : "(None)")); + if (invoiceDateField != null) { + System.out.println(" Confidence: " + (invoiceDateField.getConfidence() != null + ? String.format("%.2f", invoiceDateField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (invoiceDateField.getSource() != null ? invoiceDateField.getSource() : "N/A")); + List spans = invoiceDateField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out + .println(" Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + // Extract object fields (nested structures) using getFieldOrDefault() convenience method + // getFieldOrDefault() returns null if the field doesn't exist (safe access pattern) + ContentField totalAmountField + = documentContent.getFields() != null ? documentContent.getFields().get("TotalAmount") : null; + if (totalAmountField instanceof ObjectField) { + ObjectField totalAmountObj = (ObjectField) totalAmountField; + + // Use getFieldOrDefault() for safe nested field access + ContentField amountField = totalAmountObj.getFieldOrDefault("Amount"); + ContentField currencyField = totalAmountObj.getFieldOrDefault("CurrencyCode"); + + // Use getValue() instead of type-specific getters + Double amount = amountField != null ? (Double) amountField.getValue() : null; + String currency = currencyField != null ? (String) currencyField.getValue() : null; + + System.out.println("Total: " + (currency != null ? currency : "") + + (amount != null ? String.format("%.2f", amount) : "(None)")); + if (totalAmountObj.getConfidence() != null) { + System.out.println(" Confidence: " + String.format("%.2f", totalAmountObj.getConfidence())); + } + if (totalAmountObj.getSource() != null && !totalAmountObj.getSource().isEmpty()) { + System.out.println(" Source: " + totalAmountObj.getSource()); + } + } + + // Extract array fields using size() and get() convenience methods + // size() returns the number of elements, get(index) returns the element at the index + ContentField lineItemsField + = documentContent.getFields() != null ? documentContent.getFields().get("LineItems") : null; + if (lineItemsField instanceof ArrayField) { + ArrayField lineItems = (ArrayField) lineItemsField; + + // Use size() instead of getValueArray().size() + System.out.println("Line Items (" + lineItems.size() + "):"); + + // Use get(i) instead of getValueArray().get(i) + for (int i = 0; i < lineItems.size(); i++) { + ContentField itemField = lineItems.get(i); + if (itemField instanceof ObjectField) { + ObjectField item = (ObjectField) itemField; + + // Use getFieldOrDefault() and getValue() for cleaner access + ContentField descField = item.getFieldOrDefault("Description"); + ContentField qtyField = item.getFieldOrDefault("Quantity"); + + String description = descField != null ? (String) descField.getValue() : null; + Double quantity = qtyField != null ? (Double) qtyField.getValue() : null; + + System.out.println(" Item " + (i + 1) + ": " + (description != null ? description : "N/A")); + System.out.println(" Quantity: " + (quantity != null ? quantity : "N/A")); + if (qtyField != null && qtyField.getConfidence() != null) { + System.out.println(" Quantity Confidence: " + String.format("%.2f", qtyField.getConfidence())); + } else { + System.out.println(" Quantity Confidence: N/A"); + } + } + } + } + } + // END:ContentUnderstandingExtractInvoiceFields + + System.out.println("\nInvoice analysis completed successfully"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample03_AnalyzeInvoiceAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample03_AnalyzeInvoiceAsync.java new file mode 100644 index 000000000000..2fac259c4c74 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample03_AnalyzeInvoiceAsync.java @@ -0,0 +1,226 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ArrayField; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.ContentSpan; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.ai.contentunderstanding.models.ObjectField; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrating how to analyze invoices using Content Understanding service. + * This sample shows: + * 1. Analyzing an invoice document + * 2. Extracting structured invoice fields + * 3. Accessing nested object fields (TotalAmount) + * 4. Accessing array fields (LineItems) + * 5. Working with field confidence and source information + */ +public class Sample03_AnalyzeInvoiceAsync { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample03Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample03Async.buildClient + + // BEGIN:ContentUnderstandingAnalyzeInvoiceAsync + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String invoiceUrl + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(invoiceUrl); + + PollerFlux operation + = client.beginAnalyze("prebuilt-invoice", Arrays.asList(input)); + + operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + System.out.println("Analysis operation completed"); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + + // BEGIN:ContentUnderstandingExtractInvoiceFieldsAsync + // Get the document content (invoices are documents) + MediaContent firstContent = result.getContents().get(0); + if (firstContent instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) firstContent; + + // Print document unit information + System.out.println("Document unit: " + + (documentContent.getUnit() != null ? documentContent.getUnit().toString() : "unknown")); + System.out.println("Pages: " + documentContent.getStartPageNumber() + " to " + + documentContent.getEndPageNumber()); + System.out.println(); + + // Extract simple string fields using getValue() convenience method + // getValue() returns the typed value regardless of field type (StringField, NumberField, DateField, etc.) + ContentField customerNameField + = documentContent.getFields() != null ? documentContent.getFields().get("CustomerName") : null; + ContentField invoiceDateField + = documentContent.getFields() != null ? documentContent.getFields().get("InvoiceDate") : null; + + // Use getValue() instead of casting to specific types + // Note: getValue() returns the actual typed value - String, Number, LocalDate, etc. + String customerName = customerNameField != null ? (String) customerNameField.getValue() : null; + // InvoiceDate is a DateField, so getValue() returns LocalDate - convert to String for display + Object invoiceDateValue = invoiceDateField != null ? invoiceDateField.getValue() : null; + String invoiceDate = invoiceDateValue != null ? invoiceDateValue.toString() : null; + + System.out.println("Customer Name: " + (customerName != null ? customerName : "(None)")); + if (customerNameField != null) { + System.out.println(" Confidence: " + (customerNameField.getConfidence() != null + ? String.format("%.2f", customerNameField.getConfidence()) + : "N/A")); + System.out.println(" Source: " + + (customerNameField.getSource() != null ? customerNameField.getSource() : "N/A")); + List spans = customerNameField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out.println(" Position in markdown: offset=" + span.getOffset() + ", length=" + + span.getLength()); + } + } + + System.out.println("Invoice Date: " + (invoiceDate != null ? invoiceDate : "(None)")); + if (invoiceDateField != null) { + System.out.println(" Confidence: " + (invoiceDateField.getConfidence() != null + ? String.format("%.2f", invoiceDateField.getConfidence()) + : "N/A")); + System.out.println(" Source: " + + (invoiceDateField.getSource() != null ? invoiceDateField.getSource() : "N/A")); + List spans = invoiceDateField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out.println(" Position in markdown: offset=" + span.getOffset() + ", length=" + + span.getLength()); + } + } + + // Extract object fields (nested structures) using getFieldOrDefault() convenience method + // getFieldOrDefault() returns null if the field doesn't exist (safe access pattern) + ContentField totalAmountField + = documentContent.getFields() != null ? documentContent.getFields().get("TotalAmount") : null; + if (totalAmountField instanceof ObjectField) { + ObjectField totalAmountObj = (ObjectField) totalAmountField; + + // Use getFieldOrDefault() for safe nested field access + ContentField amountField = totalAmountObj.getFieldOrDefault("Amount"); + ContentField currencyField = totalAmountObj.getFieldOrDefault("CurrencyCode"); + + // Use getValue() instead of type-specific getters + Double amount = amountField != null ? (Double) amountField.getValue() : null; + String currency = currencyField != null ? (String) currencyField.getValue() : null; + + System.out.println("Total: " + (currency != null ? currency : "") + + (amount != null ? String.format("%.2f", amount) : "(None)")); + if (totalAmountObj.getConfidence() != null) { + System.out.println(" Confidence: " + String.format("%.2f", totalAmountObj.getConfidence())); + } + if (totalAmountObj.getSource() != null && !totalAmountObj.getSource().isEmpty()) { + System.out.println(" Source: " + totalAmountObj.getSource()); + } + } + + // Extract array fields using size() and get() convenience methods + // size() returns the number of elements, get(index) returns the element at the index + ContentField lineItemsField + = documentContent.getFields() != null ? documentContent.getFields().get("LineItems") : null; + if (lineItemsField instanceof ArrayField) { + ArrayField lineItems = (ArrayField) lineItemsField; + + // Use size() instead of getValueArray().size() + System.out.println("Line Items (" + lineItems.size() + "):"); + + // Use get(i) instead of getValueArray().get(i) + for (int i = 0; i < lineItems.size(); i++) { + ContentField itemField = lineItems.get(i); + if (itemField instanceof ObjectField) { + ObjectField item = (ObjectField) itemField; + + // Use getFieldOrDefault() and getValue() for cleaner access + ContentField descField = item.getFieldOrDefault("Description"); + ContentField qtyField = item.getFieldOrDefault("Quantity"); + + String description = descField != null ? (String) descField.getValue() : null; + Double quantity = qtyField != null ? (Double) qtyField.getValue() : null; + + System.out.println(" Item " + (i + 1) + ": " + (description != null ? description : "N/A")); + System.out.println(" Quantity: " + (quantity != null ? quantity : "N/A")); + if (qtyField != null && qtyField.getConfidence() != null) { + System.out.println(" Quantity Confidence: " + + String.format("%.2f", qtyField.getConfidence())); + } else { + System.out.println(" Quantity Confidence: N/A"); + } + } + } + } + } + // END:ContentUnderstandingExtractInvoiceFieldsAsync + + System.out.println("\nInvoice analysis completed successfully"); + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + // END:ContentUnderstandingAnalyzeInvoiceAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.MINUTES.sleep(1); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample04_CreateAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample04_CreateAnalyzer.java new file mode 100644 index 000000000000..9e99dc6fc6fe --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample04_CreateAnalyzer.java @@ -0,0 +1,251 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.ContentSpan; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.ai.contentunderstanding.models.NumberField; +import com.azure.ai.contentunderstanding.models.StringField; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Sample demonstrating how to create a custom analyzer with field schema. + * This sample shows: + * 1. Defining a field schema with custom fields + * 2. Demonstrating three extraction methods: Extract, Generate, Classify + * 3. Creating a custom analyzer with configuration + * 4. Using the custom analyzer to analyze documents + */ +public class Sample04_CreateAnalyzer { + + private static String createdAnalyzerId; + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample04.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample04.buildClient + + // BEGIN:ContentUnderstandingCreateAnalyzer + // Generate a unique analyzer ID + String analyzerId = "my_custom_analyzer_" + System.currentTimeMillis(); + + System.out.println("Creating custom analyzer '" + analyzerId + "'..."); + + // Define field schema with custom fields + // This example demonstrates three extraction methods: + // - extract: Literal text extraction (requires estimateSourceAndConfidence) + // - generate: AI-generated values based on content interpretation + // - classify: Classification against predefined categories + Map fields = new HashMap<>(); + + ContentFieldDefinition companyNameDef = new ContentFieldDefinition(); + companyNameDef.setType(ContentFieldType.STRING); + companyNameDef.setMethod(GenerationMethod.EXTRACT); + companyNameDef.setDescription("Name of the company"); + fields.put("company_name", companyNameDef); + + ContentFieldDefinition totalAmountDef = new ContentFieldDefinition(); + totalAmountDef.setType(ContentFieldType.NUMBER); + totalAmountDef.setMethod(GenerationMethod.EXTRACT); + totalAmountDef.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountDef); + + ContentFieldDefinition summaryDef = new ContentFieldDefinition(); + summaryDef.setType(ContentFieldType.STRING); + summaryDef.setMethod(GenerationMethod.GENERATE); + summaryDef.setDescription("A brief summary of the document content"); + fields.put("document_summary", summaryDef); + + ContentFieldDefinition documentTypeDef = new ContentFieldDefinition(); + documentTypeDef.setType(ContentFieldType.STRING); + documentTypeDef.setMethod(GenerationMethod.CLASSIFY); + documentTypeDef.setDescription("Type of document"); + documentTypeDef.setEnumProperty(Arrays.asList("invoice", "receipt", "contract", "report", "other")); + fields.put("document_type", documentTypeDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("company_schema"); + fieldSchema.setDescription("Schema for extracting company information"); + fieldSchema.setFields(fields); + + // Create the custom analyzer with configuration + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer customAnalyzer = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Custom analyzer for extracting company information") + .setConfig(new ContentAnalyzerConfig() + .setEnableOcr(true) + .setEnableLayout(true) + .setEnableFormula(true) + .setEstimateFieldSourceAndConfidence(true) + .setReturnDetails(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + // Create the analyzer + SyncPoller operation + = client.beginCreateAnalyzer(analyzerId, customAnalyzer, true); + + ContentAnalyzer result = operation.getFinalResult(); + System.out.println("Analyzer '" + analyzerId + "' created successfully!"); + if (result.getDescription() != null && !result.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + result.getDescription()); + } + + if (result.getFieldSchema() != null && result.getFieldSchema().getFields() != null) { + System.out.println(" Fields (" + result.getFieldSchema().getFields().size() + "):"); + result.getFieldSchema().getFields().forEach((fieldName, fieldDef) -> { + String method = fieldDef.getMethod() != null ? fieldDef.getMethod().toString() : "auto"; + String type = fieldDef.getType() != null ? fieldDef.getType().toString() : "unknown"; + System.out.println(" - " + fieldName + ": " + type + " (" + method + ")"); + }); + } + // END:ContentUnderstandingCreateAnalyzer + + createdAnalyzerId = analyzerId; // Track for later use + + // Now use the custom analyzer to analyze a document + System.out.println("\nUsing the custom analyzer to analyze a document..."); + + // BEGIN:ContentUnderstandingUseCustomAnalyzer + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String documentUrl + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(documentUrl); + + // Analyze a document using the custom analyzer + SyncPoller analyzeOperation + = client.beginAnalyze(analyzerId, Arrays.asList(input)); + + AnalyzeResult analyzeResult = analyzeOperation.getFinalResult(); + + // Extract custom fields from the result + // Since EstimateFieldSourceAndConfidence is enabled, we can access confidence scores and source information + if (analyzeResult.getContents() != null + && !analyzeResult.getContents().isEmpty() + && analyzeResult.getContents().get(0) instanceof DocumentContent) { + DocumentContent content = (DocumentContent) analyzeResult.getContents().get(0); + + // Extract field (literal text extraction) + ContentField companyNameField + = content.getFields() != null ? content.getFields().get("company_name") : null; + if (companyNameField instanceof StringField) { + StringField sf = (StringField) companyNameField; + String companyName = sf.getValueString(); + System.out + .println("Company Name (extract): " + (companyName != null ? companyName : "(not found)")); + System.out.println(" Confidence: " + (companyNameField.getConfidence() != null + ? String.format("%.2f", companyNameField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (companyNameField.getSource() != null ? companyNameField.getSource() : "N/A")); + List spans = companyNameField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out.println( + " Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + // Extract field (literal text extraction) + ContentField totalAmountField + = content.getFields() != null ? content.getFields().get("total_amount") : null; + if (totalAmountField instanceof NumberField) { + NumberField nf = (NumberField) totalAmountField; + Double totalAmount = nf.getValueNumber(); + System.out.println("Total Amount (extract): " + + (totalAmount != null ? String.format("%.2f", totalAmount) : "(not found)")); + System.out.println(" Confidence: " + (totalAmountField.getConfidence() != null + ? String.format("%.2f", totalAmountField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (totalAmountField.getSource() != null ? totalAmountField.getSource() : "N/A")); + List spans = totalAmountField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out.println( + " Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + // Generate field (AI-generated value) + ContentField summaryField + = content.getFields() != null ? content.getFields().get("document_summary") : null; + if (summaryField instanceof StringField) { + StringField sf = (StringField) summaryField; + String summary = sf.getValueString(); + System.out.println("Document Summary (generate): " + (summary != null ? summary : "(not found)")); + System.out.println(" Confidence: " + (summaryField.getConfidence() != null + ? String.format("%.2f", summaryField.getConfidence()) + : "N/A")); + // Note: Generated fields may not have source information + if (summaryField.getSource() != null && !summaryField.getSource().isEmpty()) { + System.out.println(" Source: " + summaryField.getSource()); + } + } + + // Classify field (classification against predefined categories) + ContentField documentTypeField + = content.getFields() != null ? content.getFields().get("document_type") : null; + if (documentTypeField instanceof StringField) { + StringField sf = (StringField) documentTypeField; + String documentType = sf.getValueString(); + System.out + .println("Document Type (classify): " + (documentType != null ? documentType : "(not found)")); + System.out.println(" Confidence: " + (documentTypeField.getConfidence() != null + ? String.format("%.2f", documentTypeField.getConfidence()) + : "N/A")); + // Note: Classified fields may not have source information + if (documentTypeField.getSource() != null && !documentTypeField.getSource().isEmpty()) { + System.out.println(" Source: " + documentTypeField.getSource()); + } + } + } + // END:ContentUnderstandingUseCustomAnalyzer + + // Cleanup - delete the created analyzer + System.out.println("\nCleaning up: deleting analyzer '" + createdAnalyzerId + "'..."); + client.deleteAnalyzer(createdAnalyzerId); + System.out.println("Analyzer '" + createdAnalyzerId + "' deleted successfully."); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample04_CreateAnalyzerAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample04_CreateAnalyzerAsync.java new file mode 100644 index 000000000000..699f9f0bff54 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample04_CreateAnalyzerAsync.java @@ -0,0 +1,300 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.ContentSpan; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.ai.contentunderstanding.models.NumberField; +import com.azure.ai.contentunderstanding.models.StringField; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrating how to create a custom analyzer with field schema. + * This sample shows: + * 1. Defining a field schema with custom fields + * 2. Demonstrating three extraction methods: Extract, Generate, Classify + * 3. Creating a custom analyzer with configuration + * 4. Using the custom analyzer to analyze documents + */ +public class Sample04_CreateAnalyzerAsync { + + private static String createdAnalyzerId; + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample04Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample04Async.buildClient + + // BEGIN:ContentUnderstandingCreateAnalyzerAsync + // Generate a unique analyzer ID + String analyzerId = "my_custom_analyzer_" + System.currentTimeMillis(); + + System.out.println("Creating custom analyzer '" + analyzerId + "'..."); + + // Define field schema with custom fields + // This example demonstrates three extraction methods: + // - extract: Literal text extraction (requires estimateSourceAndConfidence) + // - generate: AI-generated values based on content interpretation + // - classify: Classification against predefined categories + Map fields = new HashMap<>(); + + ContentFieldDefinition companyNameDef = new ContentFieldDefinition(); + companyNameDef.setType(ContentFieldType.STRING); + companyNameDef.setMethod(GenerationMethod.EXTRACT); + companyNameDef.setDescription("Name of the company"); + fields.put("company_name", companyNameDef); + + ContentFieldDefinition totalAmountDef = new ContentFieldDefinition(); + totalAmountDef.setType(ContentFieldType.NUMBER); + totalAmountDef.setMethod(GenerationMethod.EXTRACT); + totalAmountDef.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountDef); + + ContentFieldDefinition summaryDef = new ContentFieldDefinition(); + summaryDef.setType(ContentFieldType.STRING); + summaryDef.setMethod(GenerationMethod.GENERATE); + summaryDef.setDescription("A brief summary of the document content"); + fields.put("document_summary", summaryDef); + + ContentFieldDefinition documentTypeDef = new ContentFieldDefinition(); + documentTypeDef.setType(ContentFieldType.STRING); + documentTypeDef.setMethod(GenerationMethod.CLASSIFY); + documentTypeDef.setDescription("Type of document"); + documentTypeDef.setEnumProperty(Arrays.asList("invoice", "receipt", "contract", "report", "other")); + fields.put("document_type", documentTypeDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("company_schema"); + fieldSchema.setDescription("Schema for extracting company information"); + fieldSchema.setFields(fields); + + // Create the custom analyzer with configuration + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer customAnalyzer = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Custom analyzer for extracting company information") + .setConfig(new ContentAnalyzerConfig() + .setEnableOcr(true) + .setEnableLayout(true) + .setEnableFormula(true) + .setEstimateFieldSourceAndConfidence(true) + .setReturnDetails(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + // Create the analyzer + PollerFlux operation + = client.beginCreateAnalyzer(analyzerId, customAnalyzer, true); + + String finalAnalyzerId = analyzerId; // For use in lambda + operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + System.out.println("Analyzer '" + finalAnalyzerId + "' created successfully!"); + if (result.getDescription() != null && !result.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + result.getDescription()); + } + + if (result.getFieldSchema() != null && result.getFieldSchema().getFields() != null) { + System.out.println(" Fields (" + result.getFieldSchema().getFields().size() + "):"); + result.getFieldSchema().getFields().forEach((fieldName, fieldDef) -> { + String method = fieldDef.getMethod() != null ? fieldDef.getMethod().toString() : "auto"; + String type = fieldDef.getType() != null ? fieldDef.getType().toString() : "unknown"; + System.out.println(" - " + fieldName + ": " + type + " (" + method + ")"); + }); + } + }) + .then(Mono.fromRunnable(() -> { + // Now use the custom analyzer to analyze a document + System.out.println("\nUsing the custom analyzer to analyze a document..."); + })) + .then(Mono.fromCallable(() -> { + // BEGIN:ContentUnderstandingUseCustomAnalyzerAsync + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String documentUrl + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(documentUrl); + return input; + })) + .flatMap(input -> { + // Analyze a document using the custom analyzer + PollerFlux analyzeOperation + = client.beginAnalyze(finalAnalyzerId, Arrays.asList(input)); + + return analyzeOperation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Analysis polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Analysis polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }); + }) + .doOnNext(analyzeResult -> { + // Extract custom fields from the result + // Since EstimateFieldSourceAndConfidence is enabled, we can access confidence scores and source information + if (analyzeResult.getContents() != null + && !analyzeResult.getContents().isEmpty() + && analyzeResult.getContents().get(0) instanceof DocumentContent) { + DocumentContent content = (DocumentContent) analyzeResult.getContents().get(0); + + // Extract field (literal text extraction) + ContentField companyNameField + = content.getFields() != null ? content.getFields().get("company_name") : null; + if (companyNameField instanceof StringField) { + StringField sf = (StringField) companyNameField; + String companyName = sf.getValueString(); + System.out.println("Company Name (extract): " + (companyName != null ? companyName : "(not found)")); + System.out.println(" Confidence: " + (companyNameField.getConfidence() != null + ? String.format("%.2f", companyNameField.getConfidence()) + : "N/A")); + System.out.println(" Source: " + + (companyNameField.getSource() != null ? companyNameField.getSource() : "N/A")); + List spans = companyNameField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out.println(" Position in markdown: offset=" + span.getOffset() + ", length=" + + span.getLength()); + } + } + + // Extract field (literal text extraction) + ContentField totalAmountField + = content.getFields() != null ? content.getFields().get("total_amount") : null; + if (totalAmountField instanceof NumberField) { + NumberField nf = (NumberField) totalAmountField; + Double totalAmount = nf.getValueNumber(); + System.out.println("Total Amount (extract): " + + (totalAmount != null ? String.format("%.2f", totalAmount) : "(not found)")); + System.out.println(" Confidence: " + (totalAmountField.getConfidence() != null + ? String.format("%.2f", totalAmountField.getConfidence()) + : "N/A")); + System.out.println(" Source: " + + (totalAmountField.getSource() != null ? totalAmountField.getSource() : "N/A")); + List spans = totalAmountField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out.println(" Position in markdown: offset=" + span.getOffset() + ", length=" + + span.getLength()); + } + } + + // Generate field (AI-generated value) + ContentField summaryField + = content.getFields() != null ? content.getFields().get("document_summary") : null; + if (summaryField instanceof StringField) { + StringField sf = (StringField) summaryField; + String summary = sf.getValueString(); + System.out.println("Document Summary (generate): " + (summary != null ? summary : "(not found)")); + System.out.println(" Confidence: " + (summaryField.getConfidence() != null + ? String.format("%.2f", summaryField.getConfidence()) + : "N/A")); + // Note: Generated fields may not have source information + if (summaryField.getSource() != null && !summaryField.getSource().isEmpty()) { + System.out.println(" Source: " + summaryField.getSource()); + } + } + + // Classify field (classification against predefined categories) + ContentField documentTypeField + = content.getFields() != null ? content.getFields().get("document_type") : null; + if (documentTypeField instanceof StringField) { + StringField sf = (StringField) documentTypeField; + String documentType = sf.getValueString(); + System.out.println("Document Type (classify): " + (documentType != null ? documentType : "(not found)")); + System.out.println(" Confidence: " + (documentTypeField.getConfidence() != null + ? String.format("%.2f", documentTypeField.getConfidence()) + : "N/A")); + // Note: Classified fields may not have source information + if (documentTypeField.getSource() != null && !documentTypeField.getSource().isEmpty()) { + System.out.println(" Source: " + documentTypeField.getSource()); + } + } + } + // END:ContentUnderstandingUseCustomAnalyzerAsync + }) + .then(Mono.fromRunnable(() -> { + // Cleanup - delete the created analyzer + System.out.println("\nCleaning up: deleting analyzer '" + finalAnalyzerId + "'..."); + })) + .then(client.deleteAnalyzer(finalAnalyzerId)) + .doOnSuccess(v -> { + System.out.println("Analyzer '" + finalAnalyzerId + "' deleted successfully."); + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + // END:ContentUnderstandingCreateAnalyzerAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.SECONDS.sleep(60); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample05_CreateClassifier.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample05_CreateClassifier.java new file mode 100644 index 000000000000..c7e764dcad98 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample05_CreateClassifier.java @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentCategoryDefinition; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.HashMap; +import java.util.Map; + +/** + * Sample demonstrating how to create a classifier analyzer. + * + * This sample shows how to create a classifier that categorizes documents into predefined + * custom categories using ContentCategories. Classifiers are useful for: + * - Content organization: Organize large document collections by type through categorization + * - Data routing (optional): Route data to specific custom analyzers based on category + * - Multi-document processing: Process files containing multiple document types by automatically + * segmenting them + * + * Classifiers use custom categories defined in ContentCategories. Each category has a Description + * that helps the AI model understand what documents belong to that category. You can define up to + * 200 category names and descriptions. You can include an "other" category to handle unmatched + * content; otherwise, all files are forced to be classified into one of your defined categories. + * + * The EnableSegment property in the analyzer configuration controls whether multi-document files + * are split into segments: + * - EnableSegment = false: Classifies the entire file as a single category (classify only) + * - EnableSegment = true: Automatically splits the file into segments by category (classify and segment) + */ +public class Sample05_CreateClassifier { + + private static String createdAnalyzerId; + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample05.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample05.buildClient + + // BEGIN:ContentUnderstandingCreateClassifier + // Generate a unique classifier analyzer ID + String analyzerId = "document_classifier_" + System.currentTimeMillis(); + + System.out.println("Creating classifier analyzer '" + analyzerId + "'..."); + + // Define content categories for classification + // Each category has a description that helps the AI model understand what documents belong to it + Map categories = new HashMap<>(); + + categories.put("Loan_Application", new ContentCategoryDefinition() + .setDescription("Documents submitted by individuals or businesses to request funding, " + + "typically including personal or business details, financial history, loan amount, " + + "purpose, and supporting documentation.")); + + categories.put("Invoice", new ContentCategoryDefinition() + .setDescription("Billing documents issued by sellers or service providers to request payment " + + "for goods or services, detailing items, prices, taxes, totals, and payment terms.")); + + categories.put("Bank_Statement", new ContentCategoryDefinition() + .setDescription("Official statements issued by banks that summarize account activity over a period, " + + "including deposits, withdrawals, fees, and balances.")); + + // Create analyzer configuration with content categories + ContentAnalyzerConfig config = new ContentAnalyzerConfig() + .setReturnDetails(true) + .setEnableSegment(true) // Enable automatic segmentation by category + .setContentCategories(categories); + + // Create the classifier analyzer + // Note: models are specified using model names, not deployment names + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + + ContentAnalyzer classifier = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Custom classifier for financial document categorization") + .setConfig(config) + .setModels(models); + + // Create the classifier + SyncPoller operation + = client.beginCreateAnalyzer(analyzerId, classifier, true); + + ContentAnalyzer result = operation.getFinalResult(); + System.out.println("Classifier '" + analyzerId + "' created successfully!"); + + if (result.getDescription() != null && !result.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + result.getDescription()); + } + + if (result.getConfig() != null && result.getConfig().getContentCategories() != null) { + System.out.println(" Categories (" + result.getConfig().getContentCategories().size() + "):"); + result.getConfig().getContentCategories().forEach((categoryName, categoryDef) -> { + System.out.println(" - " + categoryName); + if (categoryDef.getDescription() != null) { + // Truncate long descriptions for display + String desc = categoryDef.getDescription(); + if (desc.length() > 60) { + desc = desc.substring(0, 57) + "..."; + } + System.out.println(" Description: " + desc); + } + }); + } + + if (result.getConfig() != null && result.getConfig().isEnableSegment() != null) { + System.out.println(" Segmentation enabled: " + result.getConfig().isEnableSegment()); + } + // END:ContentUnderstandingCreateClassifier + + createdAnalyzerId = analyzerId; // Track for cleanup + + // Cleanup - delete the created classifier analyzer + System.out.println("\nCleaning up: deleting classifier analyzer '" + createdAnalyzerId + "'..."); + client.deleteAnalyzer(createdAnalyzerId); + System.out.println("Classifier analyzer '" + createdAnalyzerId + "' deleted successfully."); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample05_CreateClassifierAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample05_CreateClassifierAsync.java new file mode 100644 index 000000000000..5f11d25e0893 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample05_CreateClassifierAsync.java @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentCategoryDefinition; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrating how to create a classifier analyzer. + * + * This sample shows how to create a classifier that categorizes documents into predefined + * custom categories using ContentCategories. Classifiers are useful for: + * - Content organization: Organize large document collections by type through categorization + * - Data routing (optional): Route data to specific custom analyzers based on category + * - Multi-document processing: Process files containing multiple document types by automatically + * segmenting them + * + * Classifiers use custom categories defined in ContentCategories. Each category has a Description + * that helps the AI model understand what documents belong to that category. You can define up to + * 200 category names and descriptions. You can include an "other" category to handle unmatched + * content; otherwise, all files are forced to be classified into one of your defined categories. + * + * The EnableSegment property in the analyzer configuration controls whether multi-document files + * are split into segments: + * - EnableSegment = false: Classifies the entire file as a single category (classify only) + * - EnableSegment = true: Automatically splits the file into segments by category (classify and segment) + */ +public class Sample05_CreateClassifierAsync { + + private static String createdAnalyzerId; + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample05Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample05Async.buildClient + + // BEGIN:ContentUnderstandingCreateClassifierAsync + // Generate a unique classifier analyzer ID + String analyzerId = "document_classifier_" + System.currentTimeMillis(); + + System.out.println("Creating classifier analyzer '" + analyzerId + "'..."); + + // Define content categories for classification + // Each category has a description that helps the AI model understand what documents belong to it + Map categories = new HashMap<>(); + + categories.put("Loan_Application", new ContentCategoryDefinition() + .setDescription("Documents submitted by individuals or businesses to request funding, " + + "typically including personal or business details, financial history, loan amount, " + + "purpose, and supporting documentation.")); + + categories.put("Invoice", new ContentCategoryDefinition() + .setDescription("Billing documents issued by sellers or service providers to request payment " + + "for goods or services, detailing items, prices, taxes, totals, and payment terms.")); + + categories.put("Bank_Statement", new ContentCategoryDefinition() + .setDescription("Official statements issued by banks that summarize account activity over a period, " + + "including deposits, withdrawals, fees, and balances.")); + + // Create analyzer configuration with content categories + ContentAnalyzerConfig config = new ContentAnalyzerConfig() + .setReturnDetails(true) + .setEnableSegment(true) // Enable automatic segmentation by category + .setContentCategories(categories); + + // Create the classifier analyzer + // Note: models are specified using model names, not deployment names + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + + ContentAnalyzer classifier = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Custom classifier for financial document categorization") + .setConfig(config) + .setModels(models); + + // Create the classifier + PollerFlux operation + = client.beginCreateAnalyzer(analyzerId, classifier, true); + + String finalAnalyzerId = analyzerId; // For use in lambda + operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + System.out.println("Classifier '" + finalAnalyzerId + "' created successfully!"); + + if (result.getDescription() != null && !result.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + result.getDescription()); + } + + if (result.getConfig() != null && result.getConfig().getContentCategories() != null) { + System.out.println(" Categories (" + result.getConfig().getContentCategories().size() + "):"); + result.getConfig().getContentCategories().forEach((categoryName, categoryDef) -> { + System.out.println(" - " + categoryName); + if (categoryDef.getDescription() != null) { + // Truncate long descriptions for display + String desc = categoryDef.getDescription(); + if (desc.length() > 60) { + desc = desc.substring(0, 57) + "..."; + } + System.out.println(" Description: " + desc); + } + }); + } + + if (result.getConfig() != null && result.getConfig().isEnableSegment() != null) { + System.out.println(" Segmentation enabled: " + result.getConfig().isEnableSegment()); + } + }) + .then(Mono.fromRunnable(() -> { + // Cleanup - delete the created classifier analyzer + System.out.println("\nCleaning up: deleting classifier analyzer '" + finalAnalyzerId + "'..."); + })) + .then(client.deleteAnalyzer(finalAnalyzerId)) + .doOnSuccess(v -> { + System.out.println("Classifier analyzer '" + finalAnalyzerId + "' deleted successfully."); + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + // END:ContentUnderstandingCreateClassifierAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.SECONDS.sleep(30); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample06_GetAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample06_GetAnalyzer.java new file mode 100644 index 000000000000..e1331302d0b3 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample06_GetAnalyzer.java @@ -0,0 +1,107 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.identity.DefaultAzureCredentialBuilder; + +/** + * Sample demonstrating how to get analyzer information. + * This sample shows: + * 1. Retrieving analyzer details by ID + * 2. Accessing analyzer configuration + * 3. Inspecting field schema definitions + * 4. Getting prebuilt analyzer information + */ +public class Sample06_GetAnalyzer { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample06.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample06.buildClient + + // BEGIN:ContentUnderstandingGetAnalyzer + // Get a prebuilt analyzer (these are always available) + String analyzerId = "prebuilt-invoice"; + + System.out.println("Retrieving analyzer '" + analyzerId + "'..."); + + ContentAnalyzer analyzer = client.getAnalyzer(analyzerId); + + System.out.println("Analyzer ID: " + analyzer.getAnalyzerId()); + System.out.println( + "Base Analyzer ID: " + (analyzer.getBaseAnalyzerId() != null ? analyzer.getBaseAnalyzerId() : "N/A")); + System.out.println("Description: " + (analyzer.getDescription() != null ? analyzer.getDescription() : "N/A")); + + // Display configuration + if (analyzer.getConfig() != null) { + System.out.println("\nAnalyzer Configuration:"); + System.out.println(" Enable OCR: " + analyzer.getConfig().isEnableOcr()); + System.out.println(" Enable Layout: " + analyzer.getConfig().isEnableLayout()); + System.out.println(" Enable Formula: " + analyzer.getConfig().isEnableFormula()); + System.out.println( + " Estimate Field Source and Confidence: " + analyzer.getConfig().isEstimateFieldSourceAndConfidence()); + System.out.println(" Return Details: " + analyzer.getConfig().isReturnDetails()); + } + + // Display field schema if available + if (analyzer.getFieldSchema() != null) { + System.out.println("\nField Schema:"); + System.out.println(" Name: " + analyzer.getFieldSchema().getName()); + System.out.println(" Description: " + (analyzer.getFieldSchema().getDescription() != null + ? analyzer.getFieldSchema().getDescription() + : "N/A")); + if (analyzer.getFieldSchema().getFields() != null) { + System.out.println(" Number of fields: " + analyzer.getFieldSchema().getFields().size()); + System.out.println(" Fields:"); + analyzer.getFieldSchema().getFields().forEach((fieldName, fieldDef) -> { + System.out.println(" - " + fieldName + " (" + fieldDef.getType() + ", Method: " + + (fieldDef.getMethod() != null ? fieldDef.getMethod() : "N/A") + ")"); + if (fieldDef.getDescription() != null && !fieldDef.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + fieldDef.getDescription()); + } + }); + } + } + + // Display models if available + if (analyzer.getModels() != null && !analyzer.getModels().isEmpty()) { + System.out.println("\nModel Mappings:"); + analyzer.getModels().forEach((modelKey, modelValue) -> { + System.out.println(" " + modelKey + ": " + modelValue); + }); + } + + // Display status if available + if (analyzer.getStatus() != null) { + System.out.println("\nAnalyzer Status: " + analyzer.getStatus()); + } + + // Display created/updated timestamps if available + if (analyzer.getCreatedAt() != null) { + System.out.println("Created: " + analyzer.getCreatedAt()); + } + if (analyzer.getLastModifiedAt() != null) { + System.out.println("Updated: " + analyzer.getLastModifiedAt()); + } + // END:ContentUnderstandingGetAnalyzer + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample06_GetAnalyzerAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample06_GetAnalyzerAsync.java new file mode 100644 index 000000000000..c935f9ff94d1 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample06_GetAnalyzerAsync.java @@ -0,0 +1,133 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrating how to get analyzer information asynchronously. + * This sample shows: + * 1. Retrieving analyzer details by ID + * 2. Accessing analyzer configuration + * 3. Inspecting field schema definitions + * 4. Getting prebuilt analyzer information + */ +public class Sample06_GetAnalyzerAsync { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample06Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample06Async.buildClient + + // BEGIN:ContentUnderstandingGetAnalyzerAsync + // Get a prebuilt analyzer (these are always available) + String analyzerId = "prebuilt-invoice"; + + System.out.println("Retrieving analyzer '" + analyzerId + "'..."); + + client.getAnalyzer(analyzerId) + .doOnNext(analyzer -> { + System.out.println("Analyzer ID: " + analyzer.getAnalyzerId()); + System.out.println("Base Analyzer ID: " + + (analyzer.getBaseAnalyzerId() != null ? analyzer.getBaseAnalyzerId() : "N/A")); + System.out.println("Description: " + (analyzer.getDescription() != null ? analyzer.getDescription() : "N/A")); + + // Display configuration + if (analyzer.getConfig() != null) { + System.out.println("\nAnalyzer Configuration:"); + System.out.println(" Enable OCR: " + analyzer.getConfig().isEnableOcr()); + System.out.println(" Enable Layout: " + analyzer.getConfig().isEnableLayout()); + System.out.println(" Enable Formula: " + analyzer.getConfig().isEnableFormula()); + System.out.println(" Estimate Field Source and Confidence: " + + analyzer.getConfig().isEstimateFieldSourceAndConfidence()); + System.out.println(" Return Details: " + analyzer.getConfig().isReturnDetails()); + } + + // Display field schema if available + if (analyzer.getFieldSchema() != null) { + System.out.println("\nField Schema:"); + System.out.println(" Name: " + analyzer.getFieldSchema().getName()); + System.out.println(" Description: " + (analyzer.getFieldSchema().getDescription() != null + ? analyzer.getFieldSchema().getDescription() + : "N/A")); + if (analyzer.getFieldSchema().getFields() != null) { + System.out.println(" Number of fields: " + analyzer.getFieldSchema().getFields().size()); + System.out.println(" Fields:"); + analyzer.getFieldSchema().getFields().forEach((fieldName, fieldDef) -> { + System.out.println(" - " + fieldName + " (" + fieldDef.getType() + ", Method: " + + (fieldDef.getMethod() != null ? fieldDef.getMethod() : "N/A") + ")"); + if (fieldDef.getDescription() != null && !fieldDef.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + fieldDef.getDescription()); + } + }); + } + } + + // Display models if available + if (analyzer.getModels() != null && !analyzer.getModels().isEmpty()) { + System.out.println("\nModel Mappings:"); + analyzer.getModels().forEach((modelKey, modelValue) -> { + System.out.println(" " + modelKey + ": " + modelValue); + }); + } + + // Display status if available + if (analyzer.getStatus() != null) { + System.out.println("\nAnalyzer Status: " + analyzer.getStatus()); + } + + // Display created/updated timestamps if available + if (analyzer.getCreatedAt() != null) { + System.out.println("Created: " + analyzer.getCreatedAt()); + } + if (analyzer.getLastModifiedAt() != null) { + System.out.println("Updated: " + analyzer.getLastModifiedAt()); + } + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + // END:ContentUnderstandingGetAnalyzerAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.SECONDS.sleep(5); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample08_UpdateAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample08_UpdateAnalyzer.java new file mode 100644 index 000000000000..bea8da5f5c7e --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample08_UpdateAnalyzer.java @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.HashMap; +import java.util.Map; + +/** + * Sample demonstrating how to update an existing analyzer. + * This sample shows: + * 1. Creating an analyzer + * 2. Updating analyzer description + * 3. Updating analyzer configuration + * 4. Updating field schema + */ +public class Sample08_UpdateAnalyzer { + + private static String analyzerId; + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample08.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample08.buildClient + + // Create an analyzer for testing + analyzerId = "update_test_analyzer_" + System.currentTimeMillis(); + System.out.println("Creating test analyzer '" + analyzerId + "'..."); + + Map fields = new HashMap<>(); + ContentFieldDefinition titleDef = new ContentFieldDefinition(); + titleDef.setType(ContentFieldType.STRING); + titleDef.setMethod(GenerationMethod.EXTRACT); + titleDef.setDescription("Document title"); + fields.put("title", titleDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("basic_schema"); + fieldSchema.setDescription("Basic document schema"); + fieldSchema.setFields(fields); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Original analyzer for update testing") + .setConfig(new ContentAnalyzerConfig() + .setEnableOcr(true) + .setEnableLayout(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + client.beginCreateAnalyzer(analyzerId, analyzer, true).getFinalResult(); + System.out.println("Test analyzer created: " + analyzerId); + + // BEGIN:ContentUnderstandingUpdateAnalyzer + // Get the current analyzer + ContentAnalyzer currentAnalyzer = client.getAnalyzer(analyzerId); + System.out.println("\nCurrent description: " + currentAnalyzer.getDescription()); + + // Update the analyzer with new configuration + Map updatedFields = new HashMap<>(); + + // Keep the original field + ContentFieldDefinition titleDefUpdate = new ContentFieldDefinition(); + titleDefUpdate.setType(ContentFieldType.STRING); + titleDefUpdate.setMethod(GenerationMethod.EXTRACT); + titleDefUpdate.setDescription("Document title"); + updatedFields.put("title", titleDefUpdate); + + // Add a new field + ContentFieldDefinition authorDef = new ContentFieldDefinition(); + authorDef.setType(ContentFieldType.STRING); + authorDef.setMethod(GenerationMethod.EXTRACT); + authorDef.setDescription("Document author"); + updatedFields.put("author", authorDef); + + ContentFieldSchema updatedFieldSchema = new ContentFieldSchema(); + updatedFieldSchema.setName("enhanced_schema"); + updatedFieldSchema.setDescription("Enhanced document schema with author"); + updatedFieldSchema.setFields(updatedFields); + + Map updatedModels = new HashMap<>(); + updatedModels.put("completion", "gpt-4.1"); + updatedModels.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer updatedAnalyzer = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Updated analyzer with enhanced schema") + .setConfig(new ContentAnalyzerConfig() + .setEnableOcr(true) + .setEnableLayout(true) + .setEnableFormula(true)) // Enable formula extraction + .setFieldSchema(updatedFieldSchema) + .setModels(updatedModels); + + // Update the analyzer using the convenience method + // This method accepts a ContentAnalyzer object directly instead of BinaryData + ContentAnalyzer result = client.updateAnalyzer(analyzerId, updatedAnalyzer); + + System.out.println("Analyzer updated successfully!"); + System.out.println("New description: " + result.getDescription()); + if (result.getFieldSchema() != null && result.getFieldSchema().getFields() != null) { + System.out.println("Field schema now has " + result.getFieldSchema().getFields().size() + " fields"); + } + // END:ContentUnderstandingUpdateAnalyzer + + // Cleanup + System.out.println("\nCleaning up: deleting test analyzer '" + analyzerId + "'..."); + client.deleteAnalyzer(analyzerId); + System.out.println("Test analyzer deleted successfully."); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample08_UpdateAnalyzerAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample08_UpdateAnalyzerAsync.java new file mode 100644 index 000000000000..9354383fb356 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample08_UpdateAnalyzerAsync.java @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrating how to update an existing analyzer asynchronously. + * This sample shows: + * 1. Creating an analyzer + * 2. Updating analyzer description + * 3. Updating analyzer configuration + * 4. Updating field schema + */ +public class Sample08_UpdateAnalyzerAsync { + + private static String analyzerId; + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample08Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample08Async.buildClient + + // Create an analyzer for testing + analyzerId = "update_test_analyzer_" + System.currentTimeMillis(); + System.out.println("Creating test analyzer '" + analyzerId + "'..."); + + Map fields = new HashMap<>(); + ContentFieldDefinition titleDef = new ContentFieldDefinition(); + titleDef.setType(ContentFieldType.STRING); + titleDef.setMethod(GenerationMethod.EXTRACT); + titleDef.setDescription("Document title"); + fields.put("title", titleDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("basic_schema"); + fieldSchema.setDescription("Basic document schema"); + fieldSchema.setFields(fields); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Original analyzer for update testing") + .setConfig(new ContentAnalyzerConfig() + .setEnableOcr(true) + .setEnableLayout(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + PollerFlux createPoller = client.beginCreateAnalyzer(analyzerId, analyzer, true); + + String finalAnalyzerId = analyzerId; // For use in lambda + createPoller.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + System.out.println("Test analyzer created: " + finalAnalyzerId); + }) + .then(client.getAnalyzer(finalAnalyzerId)) + .doOnNext(currentAnalyzer -> { + // BEGIN:ContentUnderstandingUpdateAnalyzerAsync + System.out.println("\nCurrent description: " + currentAnalyzer.getDescription()); + }) + .flatMap(currentAnalyzer -> { + // Update the analyzer with new configuration + Map updatedFields = new HashMap<>(); + + // Keep the original field + ContentFieldDefinition titleDefUpdate = new ContentFieldDefinition(); + titleDefUpdate.setType(ContentFieldType.STRING); + titleDefUpdate.setMethod(GenerationMethod.EXTRACT); + titleDefUpdate.setDescription("Document title"); + updatedFields.put("title", titleDefUpdate); + + // Add a new field + ContentFieldDefinition authorDef = new ContentFieldDefinition(); + authorDef.setType(ContentFieldType.STRING); + authorDef.setMethod(GenerationMethod.EXTRACT); + authorDef.setDescription("Document author"); + updatedFields.put("author", authorDef); + + ContentFieldSchema updatedFieldSchema = new ContentFieldSchema(); + updatedFieldSchema.setName("enhanced_schema"); + updatedFieldSchema.setDescription("Enhanced document schema with author"); + updatedFieldSchema.setFields(updatedFields); + + Map updatedModels = new HashMap<>(); + updatedModels.put("completion", "gpt-4.1"); + updatedModels.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer updatedAnalyzer = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Updated analyzer with enhanced schema") + .setConfig(new ContentAnalyzerConfig() + .setEnableOcr(true) + .setEnableLayout(true) + .setEnableFormula(true)) // Enable formula extraction + .setFieldSchema(updatedFieldSchema) + .setModels(updatedModels); + + // Update the analyzer using the convenience method + // This method accepts a ContentAnalyzer object directly instead of BinaryData + return client.updateAnalyzer(finalAnalyzerId, updatedAnalyzer); + }) + .doOnNext(result -> { + System.out.println("Analyzer updated successfully!"); + System.out.println("New description: " + result.getDescription()); + if (result.getFieldSchema() != null && result.getFieldSchema().getFields() != null) { + System.out.println("Field schema now has " + result.getFieldSchema().getFields().size() + " fields"); + } + // END:ContentUnderstandingUpdateAnalyzerAsync + }) + .then(Mono.fromRunnable(() -> { + // Cleanup + System.out.println("\nCleaning up: deleting test analyzer '" + finalAnalyzerId + "'..."); + })) + .then(client.deleteAnalyzer(finalAnalyzerId)) + .doOnSuccess(v -> { + System.out.println("Test analyzer deleted successfully."); + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.SECONDS.sleep(30); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample09_DeleteAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample09_DeleteAnalyzer.java new file mode 100644 index 000000000000..e2b6f57b4e3e --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample09_DeleteAnalyzer.java @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.exception.ResourceNotFoundException; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.HashMap; +import java.util.Map; + +/** + * Sample demonstrating how to delete an analyzer. + * This sample shows: + * 1. Creating a temporary analyzer + * 2. Verifying the analyzer exists + * 3. Deleting the analyzer + * 4. Verifying the analyzer no longer exists + */ +public class Sample09_DeleteAnalyzer { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample09.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample09.buildClient + + // BEGIN:ContentUnderstandingDeleteAnalyzer + // First, create a temporary analyzer to delete + String analyzerId = "analyzer_to_delete_" + System.currentTimeMillis(); + System.out.println("Creating temporary analyzer '" + analyzerId + "'..."); + + Map fields = new HashMap<>(); + ContentFieldDefinition titleDef = new ContentFieldDefinition(); + titleDef.setType(ContentFieldType.STRING); + titleDef.setMethod(GenerationMethod.EXTRACT); + titleDef.setDescription("Document title"); + fields.put("title", titleDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("temp_schema"); + fieldSchema.setDescription("Temporary schema for deletion demo"); + fieldSchema.setFields(fields); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Temporary analyzer for deletion demo") + .setConfig(new ContentAnalyzerConfig() + .setEnableOcr(true) + .setEnableLayout(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + client.beginCreateAnalyzer(analyzerId, analyzer, true).getFinalResult(); + System.out.println("Temporary analyzer created: " + analyzerId); + + // Verify the analyzer exists + ContentAnalyzer retrievedAnalyzer = client.getAnalyzer(analyzerId); + System.out.println("Verified analyzer exists with ID: " + retrievedAnalyzer.getAnalyzerId()); + + // Delete the analyzer + client.deleteAnalyzer(analyzerId); + System.out.println("Analyzer deleted successfully: " + analyzerId); + + // Verify the analyzer no longer exists + boolean analyzerDeleted = false; + try { + client.getAnalyzer(analyzerId); + } catch (ResourceNotFoundException e) { + analyzerDeleted = true; + System.out.println("Confirmed: Analyzer no longer exists"); + } + // END:ContentUnderstandingDeleteAnalyzer + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample09_DeleteAnalyzerAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample09_DeleteAnalyzerAsync.java new file mode 100644 index 000000000000..a78cef84922d --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample09_DeleteAnalyzerAsync.java @@ -0,0 +1,144 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.exception.ResourceNotFoundException; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrating how to delete an analyzer asynchronously. + * This sample shows: + * 1. Creating a temporary analyzer + * 2. Verifying the analyzer exists + * 3. Deleting the analyzer + * 4. Verifying the analyzer no longer exists + */ +public class Sample09_DeleteAnalyzerAsync { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample09Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample09Async.buildClient + + // BEGIN:ContentUnderstandingDeleteAnalyzerAsync + // First, create a temporary analyzer to delete + String analyzerId = "analyzer_to_delete_" + System.currentTimeMillis(); + System.out.println("Creating temporary analyzer '" + analyzerId + "'..."); + + Map fields = new HashMap<>(); + ContentFieldDefinition titleDef = new ContentFieldDefinition(); + titleDef.setType(ContentFieldType.STRING); + titleDef.setMethod(GenerationMethod.EXTRACT); + titleDef.setDescription("Document title"); + fields.put("title", titleDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("temp_schema"); + fieldSchema.setDescription("Temporary schema for deletion demo"); + fieldSchema.setFields(fields); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Temporary analyzer for deletion demo") + .setConfig(new ContentAnalyzerConfig() + .setEnableOcr(true) + .setEnableLayout(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + PollerFlux createPoller = client.beginCreateAnalyzer(analyzerId, analyzer, true); + + String finalAnalyzerId = analyzerId; // For use in lambda + createPoller.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + System.out.println("Temporary analyzer created: " + finalAnalyzerId); + }) + .then(client.getAnalyzer(finalAnalyzerId)) + .doOnNext(retrievedAnalyzer -> { + System.out.println("Verified analyzer exists with ID: " + retrievedAnalyzer.getAnalyzerId()); + }) + .then(client.deleteAnalyzer(finalAnalyzerId)) + .doOnSuccess(v -> { + System.out.println("Analyzer deleted successfully: " + finalAnalyzerId); + }) + .then(client.getAnalyzer(finalAnalyzerId)) + .doOnNext(ignored -> { + // Should not reach here if analyzer was deleted + System.out.println("Warning: Analyzer still exists after deletion"); + }) + .onErrorResume(ResourceNotFoundException.class, e -> { + System.out.println("Confirmed: Analyzer no longer exists"); + return Mono.empty(); + }) + .doOnError(error -> { + if (!(error instanceof ResourceNotFoundException)) { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + } + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + if (!(error instanceof ResourceNotFoundException)) { + // Error already handled in doOnError + System.exit(1); + } + } + ); + // END:ContentUnderstandingDeleteAnalyzerAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.SECONDS.sleep(30); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample10_AnalyzeConfigs.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample10_AnalyzeConfigs.java new file mode 100644 index 000000000000..ef3afee0cdd1 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample10_AnalyzeConfigs.java @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentAnnotation; +import com.azure.ai.contentunderstanding.models.DocumentChartFigure; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentFormula; +import com.azure.ai.contentunderstanding.models.DocumentHyperlink; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Sample demonstrating how to analyze documents with advanced configs using prebuilt-documentSearch. + * This sample shows: + * 1. Using prebuilt-documentSearch analyzer which has formulas, layout, and OCR enabled + * 2. Extracting charts from documents + * 3. Extracting hyperlinks from documents + * 4. Extracting formulas from document pages + * 5. Extracting annotations from documents + */ +public class Sample10_AnalyzeConfigs { + + public static void main(String[] args) throws IOException { + // BEGIN: com.azure.ai.contentunderstanding.sample10.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample10.buildClient + + // BEGIN:ContentUnderstandingAnalyzeWithConfigs + // Load local sample file + Path filePath = Paths.get("src/samples/resources/sample_document_features.pdf"); + byte[] fileBytes = Files.readAllBytes(filePath); + BinaryData binaryData = BinaryData.fromBytes(fileBytes); + + System.out.println("Analyzing " + filePath + " with prebuilt-documentSearch..."); + System.out.println("Note: prebuilt-documentSearch has formulas, layout, and OCR enabled by default."); + + // Analyze with prebuilt-documentSearch which has formulas, layout, and OCR enabled + // These configs enable extraction of charts, annotations, hyperlinks, and formulas + SyncPoller operation + = client.beginAnalyzeBinary("prebuilt-documentSearch", binaryData); + + AnalyzeResult result = operation.getFinalResult(); + // END:ContentUnderstandingAnalyzeWithConfigs + + // BEGIN:ContentUnderstandingExtractCharts + // Extract charts from document content (enabled by EnableFigureAnalysis config) + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) result.getContents().get(0); + + if (documentContent.getFigures() != null && !documentContent.getFigures().isEmpty()) { + List chartFigures = documentContent.getFigures() + .stream() + .filter(f -> f instanceof DocumentChartFigure) + .map(f -> (DocumentChartFigure) f) + .collect(Collectors.toList()); + + for (DocumentChartFigure chart : chartFigures) { + System.out.println(" Chart ID: " + chart.getId()); + System.out.println(" Description: " + + (chart.getDescription() != null ? chart.getDescription() : "(not available)")); + System.out.println(" Caption: " + + (chart.getCaption() != null && chart.getCaption().getContent() != null + ? chart.getCaption().getContent() : "(not available)")); + } + } + } + // END:ContentUnderstandingExtractCharts + + // BEGIN:ContentUnderstandingExtractHyperlinks + // Extract hyperlinks from document content (enabled by EnableLayout config) + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) result.getContents().get(0); + + System.out.println("\nFound " + + (docContent.getHyperlinks() != null ? docContent.getHyperlinks().size() : 0) + " hyperlink(s)"); + if (docContent.getHyperlinks() != null) { + for (DocumentHyperlink hyperlink : docContent.getHyperlinks()) { + System.out.println(" URL: " + + (hyperlink.getUrl() != null ? hyperlink.getUrl() : "(not available)")); + System.out.println(" Content: " + + (hyperlink.getContent() != null ? hyperlink.getContent() : "(not available)")); + } + } + } + // END:ContentUnderstandingExtractHyperlinks + + // BEGIN:ContentUnderstandingExtractFormulas + // Extract formulas from document pages (enabled by EnableFormula config) + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent content = (DocumentContent) result.getContents().get(0); + + int formulaCount = 0; + if (content.getPages() != null) { + for (com.azure.ai.contentunderstanding.models.DocumentPage page : content.getPages()) { + if (page.getFormulas() != null) { + formulaCount += page.getFormulas().size(); + } + } + } + + System.out.println("\nFound " + formulaCount + " formula(s)"); + if (formulaCount > 0 && content.getPages() != null) { + for (com.azure.ai.contentunderstanding.models.DocumentPage page : content.getPages()) { + if (page.getFormulas() != null) { + for (DocumentFormula formula : page.getFormulas()) { + System.out.println(" Formula Kind: " + formula.getKind()); + System.out.println(" LaTeX: " + + (formula.getValue() != null ? formula.getValue() : "(not available)")); + System.out.println(" Confidence: " + + (formula.getConfidence() != null ? String.format("%.2f", formula.getConfidence()) : "N/A")); + } + } + } + } + } + // END:ContentUnderstandingExtractFormulas + + // BEGIN:ContentUnderstandingExtractAnnotations + // Extract annotations from document content (enabled by EnableLayout config) + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent document = (DocumentContent) result.getContents().get(0); + + System.out.println("\nFound " + + (document.getAnnotations() != null ? document.getAnnotations().size() : 0) + " annotation(s)"); + if (document.getAnnotations() != null) { + for (DocumentAnnotation annotation : document.getAnnotations()) { + System.out.println(" Annotation ID: " + annotation.getId()); + System.out.println(" Kind: " + annotation.getKind()); + System.out.println(" Author: " + + (annotation.getAuthor() != null ? annotation.getAuthor() : "(not available)")); + System.out.println(" Comments: " + + (annotation.getComments() != null ? annotation.getComments().size() : 0)); + if (annotation.getComments() != null) { + for (com.azure.ai.contentunderstanding.models.DocumentAnnotationComment comment : annotation.getComments()) { + System.out.println(" - " + comment.getMessage()); + } + } + } + } + } + // END:ContentUnderstandingExtractAnnotations + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample10_AnalyzeConfigsAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample10_AnalyzeConfigsAsync.java new file mode 100644 index 000000000000..ecb9064c4a47 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample10_AnalyzeConfigsAsync.java @@ -0,0 +1,208 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentAnnotation; +import com.azure.ai.contentunderstanding.models.DocumentChartFigure; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentFormula; +import com.azure.ai.contentunderstanding.models.DocumentHyperlink; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** + * Sample demonstrating how to analyze documents with advanced configs using prebuilt-documentSearch asynchronously. + * This sample shows: + * 1. Using prebuilt-documentSearch analyzer which has formulas, layout, and OCR enabled + * 2. Extracting charts from documents + * 3. Extracting hyperlinks from documents + * 4. Extracting formulas from document pages + * 5. Extracting annotations from documents + */ +public class Sample10_AnalyzeConfigsAsync { + + public static void main(String[] args) throws IOException { + // BEGIN: com.azure.ai.contentunderstanding.sample10Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample10Async.buildClient + + // BEGIN:ContentUnderstandingAnalyzeWithConfigsAsync + // Load local sample file + Path filePath = Paths.get("src/samples/resources/sample_document_features.pdf"); + byte[] fileBytes = Files.readAllBytes(filePath); + BinaryData binaryData = BinaryData.fromBytes(fileBytes); + + System.out.println("Analyzing " + filePath + " with prebuilt-documentSearch..."); + System.out.println("Note: prebuilt-documentSearch has formulas, layout, and OCR enabled by default."); + + // Analyze with prebuilt-documentSearch which has formulas, layout, and OCR enabled + // These configs enable extraction of charts, annotations, hyperlinks, and formulas + PollerFlux operation + = client.beginAnalyzeBinary("prebuilt-documentSearch", binaryData); + + operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + // BEGIN:ContentUnderstandingExtractChartsAsync + // Extract charts from document content (enabled by EnableFigureAnalysis config) + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) result.getContents().get(0); + + if (documentContent.getFigures() != null && !documentContent.getFigures().isEmpty()) { + List chartFigures = documentContent.getFigures() + .stream() + .filter(f -> f instanceof DocumentChartFigure) + .map(f -> (DocumentChartFigure) f) + .collect(Collectors.toList()); + + for (DocumentChartFigure chart : chartFigures) { + System.out.println(" Chart ID: " + chart.getId()); + System.out.println(" Description: " + + (chart.getDescription() != null ? chart.getDescription() : "(not available)")); + System.out.println(" Caption: " + + (chart.getCaption() != null && chart.getCaption().getContent() != null + ? chart.getCaption().getContent() : "(not available)")); + } + } + } + // END:ContentUnderstandingExtractChartsAsync + + // BEGIN:ContentUnderstandingExtractHyperlinksAsync + // Extract hyperlinks from document content (enabled by EnableLayout config) + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) result.getContents().get(0); + + System.out.println("\nFound " + + (docContent.getHyperlinks() != null ? docContent.getHyperlinks().size() : 0) + " hyperlink(s)"); + if (docContent.getHyperlinks() != null) { + for (DocumentHyperlink hyperlink : docContent.getHyperlinks()) { + System.out.println(" URL: " + + (hyperlink.getUrl() != null ? hyperlink.getUrl() : "(not available)")); + System.out.println(" Content: " + + (hyperlink.getContent() != null ? hyperlink.getContent() : "(not available)")); + } + } + } + // END:ContentUnderstandingExtractHyperlinksAsync + + // BEGIN:ContentUnderstandingExtractFormulasAsync + // Extract formulas from document pages (enabled by EnableFormula config) + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent content = (DocumentContent) result.getContents().get(0); + + int formulaCount = 0; + if (content.getPages() != null) { + for (com.azure.ai.contentunderstanding.models.DocumentPage page : content.getPages()) { + if (page.getFormulas() != null) { + formulaCount += page.getFormulas().size(); + } + } + } + + System.out.println("\nFound " + formulaCount + " formula(s)"); + if (formulaCount > 0 && content.getPages() != null) { + for (com.azure.ai.contentunderstanding.models.DocumentPage page : content.getPages()) { + if (page.getFormulas() != null) { + for (DocumentFormula formula : page.getFormulas()) { + System.out.println(" Formula Kind: " + formula.getKind()); + System.out.println(" LaTeX: " + + (formula.getValue() != null ? formula.getValue() : "(not available)")); + System.out.println(" Confidence: " + + (formula.getConfidence() != null ? String.format("%.2f", formula.getConfidence()) + : "N/A")); + } + } + } + } + } + // END:ContentUnderstandingExtractFormulasAsync + + // BEGIN:ContentUnderstandingExtractAnnotationsAsync + // Extract annotations from document content (enabled by EnableLayout config) + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent document = (DocumentContent) result.getContents().get(0); + + System.out.println("\nFound " + + (document.getAnnotations() != null ? document.getAnnotations().size() : 0) + " annotation(s)"); + if (document.getAnnotations() != null) { + for (DocumentAnnotation annotation : document.getAnnotations()) { + System.out.println(" Annotation ID: " + annotation.getId()); + System.out.println(" Kind: " + annotation.getKind()); + System.out.println(" Author: " + + (annotation.getAuthor() != null ? annotation.getAuthor() : "(not available)")); + System.out.println(" Comments: " + + (annotation.getComments() != null ? annotation.getComments().size() : 0)); + if (annotation.getComments() != null) { + for (com.azure.ai.contentunderstanding.models.DocumentAnnotationComment comment : annotation + .getComments()) { + System.out.println(" - " + comment.getMessage()); + } + } + } + } + } + // END:ContentUnderstandingExtractAnnotationsAsync + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + // END:ContentUnderstandingAnalyzeWithConfigsAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.MINUTES.sleep(1); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample11_AnalyzeReturnRawJson.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample11_AnalyzeReturnRawJson.java new file mode 100644 index 000000000000..4fbd58549d51 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample11_AnalyzeReturnRawJson.java @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.http.rest.RequestOptions; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; + +/** + * Sample demonstrating how to analyze documents and get raw JSON response using protocol methods. + * This sample shows: + * 1. Using protocol method to get raw JSON response instead of strongly-typed objects + * 2. Parsing raw JSON response + * 3. Pretty-printing and saving JSON to file + * + * Note: For production use, prefer the object model approach (beginAnalyzeBinary with typed parameters) + * which returns AnalyzeResult objects that are easier to work with. + */ +public class Sample11_AnalyzeReturnRawJson { + + public static void main(String[] args) throws IOException { + // BEGIN: com.azure.ai.contentunderstanding.sample11.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample11.buildClient + + System.out.println("Client initialized successfully"); + + // BEGIN:ContentUnderstandingAnalyzeReturnRawJson + // Load local sample file + Path filePath = Paths.get("src/samples/resources/sample_invoice.pdf"); + byte[] fileBytes = Files.readAllBytes(filePath); + + // Prepare request body with binary data using JSON format + // Note: The API expects a JSON request with "inputs" array containing document data + String base64Data = java.util.Base64.getEncoder().encodeToString(fileBytes); + String requestJson = String.format("{\"inputs\": [{\"data\": \"%s\"}]}", base64Data); + BinaryData requestBody = BinaryData.fromString(requestJson); + + // Use protocol method to get raw JSON response + // Note: For production use, prefer the object model approach (beginAnalyze with typed parameters) + // which returns AnalyzeResult objects that are easier to work with + SyncPoller operation + = client.beginAnalyze("prebuilt-documentSearch", requestBody, new RequestOptions()); + + BinaryData responseData = operation.getFinalResult(); + // END:ContentUnderstandingAnalyzeReturnRawJson + + System.out.println("File loaded: " + filePath + " (" + String.format("%,d", fileBytes.length) + " bytes)"); + System.out.println("Analysis operation completed with status: " + operation.poll().getStatus()); + System.out.println("Response data size: " + String.format("%,d", responseData.toBytes().length) + " bytes"); + + // Verify response data can be converted to string + String responseString = responseData.toString(); + System.out.println("Response string length: " + String.format("%,d", responseString.length()) + " characters"); + + // Verify response is valid JSON format + try { + ObjectMapper mapper = new ObjectMapper(); + mapper.readTree(responseData.toBytes()); + System.out.println("Response is valid JSON format"); + } catch (Exception ex) { + System.err.println("Response data is not valid JSON: " + ex.getMessage()); + } + + System.out.println("Raw JSON analysis operation completed successfully"); + + // BEGIN:ContentUnderstandingParseRawJson + // Parse the raw JSON response + ObjectMapper mapper = new ObjectMapper(); + JsonNode jsonNode = mapper.readTree(responseData.toBytes()); + + // Pretty-print the JSON + String prettyJson = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonNode); + + // Create output directory if it doesn't exist + Path outputDir = Paths.get("target/sample_output"); + Files.createDirectories(outputDir); + + // Save to file + String timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss")); + String outputFileName = "analyze_result_" + timestamp + ".json"; + Path outputPath = outputDir.resolve(outputFileName); + Files.write(outputPath, prettyJson.getBytes(java.nio.charset.StandardCharsets.UTF_8)); + + System.out.println("Raw JSON response saved to: " + outputPath); + System.out.println("File size: " + String.format("%,d", prettyJson.length()) + " characters"); + // END:ContentUnderstandingParseRawJson + + System.out.println("\nRaw JSON result saved to: " + outputPath); + long fileSize = Files.size(outputPath); + System.out.println("File size: " + String.format("%,d", fileSize) + " bytes"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample11_AnalyzeReturnRawJsonAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample11_AnalyzeReturnRawJsonAsync.java new file mode 100644 index 000000000000..88716d062962 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample11_AnalyzeReturnRawJsonAsync.java @@ -0,0 +1,161 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.http.rest.RequestOptions; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrating how to analyze documents and get raw JSON response using protocol methods asynchronously. + * This sample shows: + * 1. Using protocol method to get raw JSON response instead of strongly-typed objects + * 2. Parsing raw JSON response + * 3. Pretty-printing and saving JSON to file + * + * Note: For production use, prefer the object model approach (beginAnalyzeBinary with typed parameters) + * which returns AnalyzeResult objects that are easier to work with. + */ +public class Sample11_AnalyzeReturnRawJsonAsync { + + public static void main(String[] args) throws IOException { + // BEGIN: com.azure.ai.contentunderstanding.sample11Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample11Async.buildClient + + System.out.println("Client initialized successfully"); + + // BEGIN:ContentUnderstandingAnalyzeReturnRawJsonAsync + // Load local sample file + Path filePath = Paths.get("src/samples/resources/sample_invoice.pdf"); + byte[] fileBytes = Files.readAllBytes(filePath); + + // Prepare request body with binary data using JSON format + // Note: The API expects a JSON request with "inputs" array containing document data + String base64Data = java.util.Base64.getEncoder().encodeToString(fileBytes); + String requestJson = String.format("{\"inputs\": [{\"data\": \"%s\"}]}", base64Data); + BinaryData requestBody = BinaryData.fromString(requestJson); + + // Use protocol method to get raw JSON response + // Note: For production use, prefer the object model approach (beginAnalyze with typed parameters) + // which returns AnalyzeResult objects that are easier to work with + PollerFlux operation + = client.beginAnalyze("prebuilt-documentSearch", requestBody, new RequestOptions()); + + System.out.println("File loaded: " + filePath + " (" + String.format("%,d", fileBytes.length) + " bytes)"); + + operation.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + System.out.println("Analysis operation completed with status: " + pollResponse.getStatus()); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(responseData -> { + System.out.println("Response data size: " + String.format("%,d", responseData.toBytes().length) + " bytes"); + + // Verify response data can be converted to string + String responseString = responseData.toString(); + System.out.println("Response string length: " + String.format("%,d", responseString.length()) + " characters"); + + // Verify response is valid JSON format + try { + ObjectMapper mapper = new ObjectMapper(); + mapper.readTree(responseData.toBytes()); + System.out.println("Response is valid JSON format"); + } catch (Exception ex) { + System.err.println("Response data is not valid JSON: " + ex.getMessage()); + } + + System.out.println("Raw JSON analysis operation completed successfully"); + + // BEGIN:ContentUnderstandingParseRawJsonAsync + // Parse the raw JSON response + try { + ObjectMapper mapper = new ObjectMapper(); + JsonNode jsonNode = mapper.readTree(responseData.toBytes()); + + // Pretty-print the JSON + String prettyJson = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonNode); + + // Create output directory if it doesn't exist + Path outputDir = Paths.get("target/sample_output"); + Files.createDirectories(outputDir); + + // Save to file + String timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss")); + String outputFileName = "analyze_result_" + timestamp + ".json"; + Path outputPath = outputDir.resolve(outputFileName); + Files.write(outputPath, prettyJson.getBytes(java.nio.charset.StandardCharsets.UTF_8)); + + System.out.println("Raw JSON response saved to: " + outputPath); + System.out.println("File size: " + String.format("%,d", prettyJson.length()) + " characters"); + + System.out.println("\nRaw JSON result saved to: " + outputPath); + long fileSize = Files.size(outputPath); + System.out.println("File size: " + String.format("%,d", fileSize) + " bytes"); + } catch (IOException e) { + System.err.println("Error saving JSON file: " + e.getMessage()); + e.printStackTrace(); + } + // END:ContentUnderstandingParseRawJsonAsync + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + // END:ContentUnderstandingAnalyzeReturnRawJsonAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.MINUTES.sleep(1); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample12_GetResultFile.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample12_GetResultFile.java new file mode 100644 index 000000000000..8ee6700d5f7a --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample12_GetResultFile.java @@ -0,0 +1,250 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.AudioVisualContent; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/** + * Sample demonstrates how to retrieve result files (like keyframe images) from video analysis operations. + */ +public class Sample12_GetResultFile { + + public static void main(String[] args) throws IOException { + // BEGIN: com.azure.ai.contentunderstanding.sample12.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample12.buildClient + + System.out.println("Client initialized successfully"); + + // BEGIN: com.azure.ai.contentunderstanding.getResultFile + // For video analysis, use a video URL to get keyframes + String videoUrl + = "https://github.com/Azure-Samples/azure-ai-content-understanding-assets/raw/refs/heads/main/videos/sdk_samples/FlightSimulator.mp4"; + + // Step 1: Start the video analysis operation + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(videoUrl); + + SyncPoller poller + = client.beginAnalyze("prebuilt-videoSearch", Arrays.asList(input)); + + System.out.println("Started analysis operation"); + + // Wait for completion + AnalyzeResult result = poller.getFinalResult(); + System.out.println("Analysis completed successfully!"); + + // Get the operation ID from the polling result using the getOperationId() convenience method + // The operation ID is extracted from the Operation-Location header and can be used with + // getResultFile() and deleteResult() APIs + String operationId = poller.poll().getValue().getOperationId(); + System.out.println("Operation ID: " + operationId); + + // END: com.azure.ai.contentunderstanding.getResultFile + + System.out.println("Video URL: " + videoUrl); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + + // BEGIN: com.azure.ai.contentunderstanding.getResultFile.keyframes + // Step 2: Get keyframes from video analysis result + AudioVisualContent videoContent = null; + for (Object content : result.getContents()) { + if (content instanceof AudioVisualContent) { + videoContent = (AudioVisualContent) content; + break; + } + } + + if (videoContent != null + && videoContent.getKeyFrameTimesMs() != null + && !videoContent.getKeyFrameTimesMs().isEmpty()) { + List keyFrameTimes = videoContent.getKeyFrameTimesMs(); + System.out.println("Total keyframes: " + keyFrameTimes.size()); + + // Get the first keyframe + long firstFrameTimeMs = keyFrameTimes.get(0); + System.out.println("First keyframe time: " + firstFrameTimeMs + " ms"); + + // Construct the keyframe path + String framePath = "keyframes/" + firstFrameTimeMs; + System.out.println("Getting result file: " + framePath); + + // Retrieve the keyframe image with retry logic + // Note: Result files may not be immediately available after analysis completion + // The service requires additional time for keyframe extraction + BinaryData fileData = null; + int maxRetries = 12; + int retryDelayMs = 10000; // 10 seconds between retries + for (int attempt = 1; attempt <= maxRetries; attempt++) { + try { + fileData = client.getResultFile(operationId, framePath); + break; // Success + } catch (Exception e) { + if (attempt == maxRetries) { + throw e; + } + System.out.println("Attempt " + attempt + " failed: " + e.getMessage()); + System.out.println("Waiting " + (retryDelayMs / 1000) + " seconds before retry..."); + try { + Thread.sleep(retryDelayMs); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Interrupted while waiting for retry", ie); + } + } + } + byte[] imageBytes = fileData.toBytes(); + System.out.println("Retrieved keyframe image (" + String.format("%,d", imageBytes.length) + " bytes)"); + + // Save the keyframe image + Path outputDir = Paths.get("target", "sample_output"); + Files.createDirectories(outputDir); + String outputFileName = "keyframe_" + firstFrameTimeMs + ".jpg"; + Path outputPath = outputDir.resolve(outputFileName); + Files.write(outputPath, imageBytes); + + System.out.println("Keyframe image saved to: " + outputPath.toAbsolutePath()); + // END: com.azure.ai.contentunderstanding.getResultFile.keyframes + + System.out.println("\n🎬 Keyframe Information:"); + System.out.println("Total keyframes: " + keyFrameTimes.size()); + + // Get keyframe statistics + long lastFrameTimeMs = keyFrameTimes.get(keyFrameTimes.size() - 1); + double avgFrameInterval = keyFrameTimes.size() > 1 + ? (double) (lastFrameTimeMs - firstFrameTimeMs) / (keyFrameTimes.size() - 1) + : 0; + + System.out.println(" First keyframe: " + firstFrameTimeMs + " ms (" + + String.format("%.2f", firstFrameTimeMs / 1000.0) + " seconds)"); + System.out.println(" Last keyframe: " + lastFrameTimeMs + " ms (" + + String.format("%.2f", lastFrameTimeMs / 1000.0) + " seconds)"); + if (keyFrameTimes.size() > 1) { + System.out.println(" Average interval: " + String.format("%.2f", avgFrameInterval) + " ms"); + } + + System.out.println("\n📥 File Data Retrieved"); + + System.out.println("\nVerifying image data..."); + System.out.println("Image size: " + String.format("%,d", imageBytes.length) + " bytes (" + + String.format("%.2f", imageBytes.length / 1024.0) + " KB)"); + + // Verify image format + String imageFormat = detectImageFormat(imageBytes); + System.out.println("Detected image format: " + imageFormat); + + System.out.println("\n💾 Saved File:"); + long fileSize = Files.size(outputPath); + System.out.println("File saved: " + outputPath.toAbsolutePath()); + System.out.println("File size: " + String.format("%,d", fileSize) + " bytes"); + + // Test additional keyframes if available + if (keyFrameTimes.size() > 1) { + System.out + .println("\nTesting additional keyframes (" + (keyFrameTimes.size() - 1) + " more available)..."); + int middleIndex = keyFrameTimes.size() / 2; + long middleFrameTimeMs = keyFrameTimes.get(middleIndex); + String middleFramePath = "keyframes/" + middleFrameTimeMs; + + BinaryData middleFileData = client.getResultFile(operationId, middleFramePath); + System.out.println( + "Successfully retrieved keyframe at index " + middleIndex + " (" + middleFrameTimeMs + " ms)"); + System.out.println( + " Size: " + String.format("%,d", middleFileData.toBytes().length) + " bytes"); + } + + // Summary + System.out.println("\nKeyframe retrieval completed successfully:"); + System.out.println(" Operation ID: " + operationId); + System.out.println(" Total keyframes: " + keyFrameTimes.size()); + System.out.println(" First keyframe time: " + firstFrameTimeMs + " ms"); + System.out.println(" Image format: " + imageFormat); + System.out.println(" Image size: " + String.format("%,d", imageBytes.length) + " bytes"); + System.out.println(" Saved to: " + outputPath.toAbsolutePath()); + } else { + // No video content (expected for document analysis) + System.out.println("\nGetResultFile API Usage Example:"); + System.out.println(" For video analysis with keyframes:"); + System.out.println(" 1. Analyze video with prebuilt-videoSearch"); + System.out.println(" 2. Get keyframe times from AudioVisualContent.getKeyFrameTimesMs()"); + System.out.println(" 3. Retrieve keyframes using getResultFile():"); + System.out.println(" BinaryData fileData = client.getResultFile(\"" + operationId + + "\", \"keyframes/1000\");"); + System.out.println(" 4. Save or process the keyframe image"); + + System.out.println("Operation ID available for GetResultFile API: " + operationId); + } + } + + /** + * Detect image format from magic bytes. + */ + private static String detectImageFormat(byte[] imageBytes) { + if (imageBytes.length < 2) { + return "Unknown"; + } + + // Check JPEG magic bytes (FF D8) + if (imageBytes[0] == (byte) 0xFF && imageBytes[1] == (byte) 0xD8) { + return "JPEG"; + } + + // Check PNG magic bytes (89 50 4E 47) + if (imageBytes.length >= 4 + && imageBytes[0] == (byte) 0x89 + && imageBytes[1] == 0x50 + && imageBytes[2] == 0x4E + && imageBytes[3] == 0x47) { + return "PNG"; + } + + // Check GIF magic bytes (47 49 46) + if (imageBytes.length >= 3 && imageBytes[0] == 0x47 && imageBytes[1] == 0x49 && imageBytes[2] == 0x46) { + return "GIF"; + } + + // Check WebP magic bytes (52 49 46 46 ... 57 45 42 50) + if (imageBytes.length >= 12 + && imageBytes[0] == 0x52 + && imageBytes[1] == 0x49 + && imageBytes[8] == 0x57 + && imageBytes[9] == 0x45 + && imageBytes[10] == 0x42 + && imageBytes[11] == 0x50) { + return "WebP"; + } + + return "Unknown"; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample12_GetResultFileAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample12_GetResultFileAsync.java new file mode 100644 index 000000000000..6ebe3ce2f313 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample12_GetResultFileAsync.java @@ -0,0 +1,303 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.AudioVisualContent; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrates how to retrieve result files (like keyframe images) from video analysis operations + * using the async client. + */ +public class Sample12_GetResultFileAsync { + + public static void main(String[] args) throws IOException { + // BEGIN: com.azure.ai.contentunderstanding.sample12Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the async client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample12Async.buildClient + + System.out.println("Client initialized successfully"); + + // BEGIN: com.azure.ai.contentunderstanding.getResultFileAsync + // For video analysis, use a video URL to get keyframes + String videoUrl + = "https://github.com/Azure-Samples/azure-ai-content-understanding-assets/raw/refs/heads/main/videos/sdk_samples/FlightSimulator.mp4"; + + // Step 1: Start the video analysis operation + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(videoUrl); + + PollerFlux poller + = client.beginAnalyze("prebuilt-videoSearch", Arrays.asList(input)); + + System.out.println("Started analysis operation"); + + poller.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + + // Get the operation ID from the polling result using the getOperationId() convenience method + // The operation ID is extracted from the Operation-Location header and can be used with + // getResultFile() and deleteResult() APIs + String operationId = pollResponse.getValue().getOperationId(); + System.out.println("Operation ID: " + operationId); + + return pollResponse.getFinalResult() + .map(result -> { + // Store operationId and result together for use in doOnNext + return new java.util.AbstractMap.SimpleEntry<>(operationId, result); + }); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(entry -> { + String operationId = entry.getKey(); + AnalyzeResult result = entry.getValue(); + + System.out.println("Analysis completed successfully!"); + System.out.println("Video URL: " + videoUrl); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + + // BEGIN: com.azure.ai.contentunderstanding.getResultFileAsync.keyframes + // Step 2: Get keyframes from video analysis result + AudioVisualContent videoContent = null; + for (Object content : result.getContents()) { + if (content instanceof AudioVisualContent) { + videoContent = (AudioVisualContent) content; + break; + } + } + + if (videoContent != null + && videoContent.getKeyFrameTimesMs() != null + && !videoContent.getKeyFrameTimesMs().isEmpty()) { + List keyFrameTimes = videoContent.getKeyFrameTimesMs(); + System.out.println("Total keyframes: " + keyFrameTimes.size()); + + // Get the first keyframe + long firstFrameTimeMs = keyFrameTimes.get(0); + System.out.println("First keyframe time: " + firstFrameTimeMs + " ms"); + + // Construct the keyframe path + String framePath = "keyframes/" + firstFrameTimeMs; + System.out.println("Getting result file: " + framePath); + + // Retrieve the keyframe image with retry logic + // Note: Result files may not be immediately available after analysis completion + // The service requires additional time for keyframe extraction + BinaryData fileData = null; + int maxRetries = 12; + int retryDelayMs = 10000; // 10 seconds between retries + for (int attempt = 1; attempt <= maxRetries; attempt++) { + try { + fileData = client.getResultFile(operationId, framePath).block(); + break; // Success + } catch (Exception e) { + if (attempt == maxRetries) { + throw e; + } + System.out.println("Attempt " + attempt + " failed: " + e.getMessage()); + System.out.println("Waiting " + (retryDelayMs / 1000) + " seconds before retry..."); + try { + Thread.sleep(retryDelayMs); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Interrupted while waiting for retry", ie); + } + } + } + byte[] imageBytes = fileData.toBytes(); + System.out.println("Retrieved keyframe image (" + String.format("%,d", imageBytes.length) + " bytes)"); + + // Save the keyframe image + Path outputDir = Paths.get("target", "sample_output"); + try { + Files.createDirectories(outputDir); + } catch (IOException e) { + throw new RuntimeException("Failed to create output directory", e); + } + String outputFileName = "keyframe_" + firstFrameTimeMs + ".jpg"; + Path outputPath = outputDir.resolve(outputFileName); + try { + Files.write(outputPath, imageBytes); + } catch (IOException e) { + throw new RuntimeException("Failed to write keyframe image", e); + } + + System.out.println("Keyframe image saved to: " + outputPath.toAbsolutePath()); + // END: com.azure.ai.contentunderstanding.getResultFileAsync.keyframes + + System.out.println("\n🎬 Keyframe Information:"); + System.out.println("Total keyframes: " + keyFrameTimes.size()); + + // Get keyframe statistics + long lastFrameTimeMs = keyFrameTimes.get(keyFrameTimes.size() - 1); + double avgFrameInterval = keyFrameTimes.size() > 1 + ? (double) (lastFrameTimeMs - firstFrameTimeMs) / (keyFrameTimes.size() - 1) + : 0; + + System.out.println(" First keyframe: " + firstFrameTimeMs + " ms (" + + String.format("%.2f", firstFrameTimeMs / 1000.0) + " seconds)"); + System.out.println(" Last keyframe: " + lastFrameTimeMs + " ms (" + + String.format("%.2f", lastFrameTimeMs / 1000.0) + " seconds)"); + if (keyFrameTimes.size() > 1) { + System.out.println(" Average interval: " + String.format("%.2f", avgFrameInterval) + " ms"); + } + + System.out.println("\n📥 File Data Retrieved"); + + System.out.println("\nVerifying image data..."); + System.out.println("Image size: " + String.format("%,d", imageBytes.length) + " bytes (" + + String.format("%.2f", imageBytes.length / 1024.0) + " KB)"); + + // Verify image format + String imageFormat = detectImageFormat(imageBytes); + System.out.println("Detected image format: " + imageFormat); + + System.out.println("\n💾 Saved File:"); + long fileSize; + try { + fileSize = Files.size(outputPath); + } catch (IOException e) { + throw new RuntimeException("Failed to get file size", e); + } + System.out.println("File saved: " + outputPath.toAbsolutePath()); + System.out.println("File size: " + String.format("%,d", fileSize) + " bytes"); + + // Test additional keyframes if available + if (keyFrameTimes.size() > 1) { + System.out.println("\nTesting additional keyframes (" + (keyFrameTimes.size() - 1) + + " more available)..."); + int middleIndex = keyFrameTimes.size() / 2; + long middleFrameTimeMs = keyFrameTimes.get(middleIndex); + String middleFramePath = "keyframes/" + middleFrameTimeMs; + + // Note: Using .block() in retry loops is acceptable per skill documentation + BinaryData middleFileData = client.getResultFile(operationId, middleFramePath).block(); + System.out.println( + "Successfully retrieved keyframe at index " + middleIndex + " (" + middleFrameTimeMs + " ms)"); + System.out.println(" Size: " + String.format("%,d", middleFileData.toBytes().length) + " bytes"); + } + + // Summary + System.out.println("\nKeyframe retrieval completed successfully:"); + System.out.println(" Operation ID: " + operationId); + System.out.println(" Total keyframes: " + keyFrameTimes.size()); + System.out.println(" First keyframe time: " + firstFrameTimeMs + " ms"); + System.out.println(" Image format: " + imageFormat); + System.out.println(" Image size: " + String.format("%,d", imageBytes.length) + " bytes"); + System.out.println(" Saved to: " + outputPath.toAbsolutePath()); + } else { + // No video content (expected for document analysis) + System.out.println("\nGetResultFile API Usage Example:"); + System.out.println(" For video analysis with keyframes:"); + System.out.println(" 1. Analyze video with prebuilt-videoSearch"); + System.out.println(" 2. Get keyframe times from AudioVisualContent.getKeyFrameTimesMs()"); + System.out.println(" 3. Retrieve keyframes using getResultFile():"); + System.out.println(" Mono fileData = client.getResultFile(\"" + operationId + + "\", \"keyframes/1000\");"); + System.out.println(" 4. Save or process the keyframe image"); + + System.out.println("Operation ID available for GetResultFile API: " + operationId); + } + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + // END: com.azure.ai.contentunderstanding.getResultFileAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.MINUTES.sleep(2); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } + + /** + * Detect image format from magic bytes. + */ + private static String detectImageFormat(byte[] imageBytes) { + if (imageBytes.length < 2) { + return "Unknown"; + } + + // Check JPEG magic bytes (FF D8) + if (imageBytes[0] == (byte) 0xFF && imageBytes[1] == (byte) 0xD8) { + return "JPEG"; + } + + // Check PNG magic bytes (89 50 4E 47) + if (imageBytes.length >= 4 + && imageBytes[0] == (byte) 0x89 + && imageBytes[1] == 0x50 + && imageBytes[2] == 0x4E + && imageBytes[3] == 0x47) { + return "PNG"; + } + + // Check GIF magic bytes (47 49 46) + if (imageBytes.length >= 3 && imageBytes[0] == 0x47 && imageBytes[1] == 0x49 && imageBytes[2] == 0x46) { + return "GIF"; + } + + // Check WebP magic bytes (52 49 46 46 ... 57 45 42 50) + if (imageBytes.length >= 12 + && imageBytes[0] == 0x52 + && imageBytes[1] == 0x49 + && imageBytes[8] == 0x57 + && imageBytes[9] == 0x45 + && imageBytes[10] == 0x42 + && imageBytes[11] == 0x50) { + return "WebP"; + } + + return "Unknown"; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample13_DeleteResult.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample13_DeleteResult.java new file mode 100644 index 000000000000..66721fe7afac --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample13_DeleteResult.java @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.Arrays; +import java.util.Collections; + +/** + * Sample demonstrates how to delete analysis results after they are no longer needed. + */ +public class Sample13_DeleteResult { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample13.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample13.buildClient + + System.out.println("Client initialized successfully"); + + // BEGIN: com.azure.ai.contentunderstanding.deleteResult + // Step 1: Analyze a document + String documentUrl + = "https://github.com/Azure-Samples/cognitive-services-REST-api-samples/raw/master/curl/form-recognizer/sample-invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(documentUrl); + + SyncPoller poller + = client.beginAnalyze("prebuilt-invoice", Arrays.asList(input)); + + // Wait for operation to complete + System.out.println("Started analysis operation"); + + // Wait for completion + AnalyzeResult result = poller.getFinalResult(); + System.out.println("Analysis completed successfully!"); + + // Get the operation ID using the getOperationId() convenience method + // This ID is extracted from the Operation-Location header and is needed for deleteResult() + String operationId = poller.poll().getValue().getOperationId(); + System.out.println("Operation ID: " + operationId); + + // Display some sample results using getValue() convenience method + if (result.getContents() != null && !result.getContents().isEmpty()) { + Object firstContent = result.getContents().get(0); + if (firstContent instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) firstContent; + java.util.Map fields = docContent.getFields(); + if (fields != null) { + System.out.println("Total fields extracted: " + fields.size()); + ContentField customerNameField = fields.get("CustomerName"); + if (customerNameField != null) { + // Use getValue() instead of casting to StringField + String customerName = (String) customerNameField.getValue(); + System.out.println("Customer Name: " + (customerName != null ? customerName : "(not found)")); + } + } + } + } + + // Step 2: Delete the analysis result using the operation ID + // This cleans up the server-side resources (including keyframe images for video analysis) + client.deleteResult(operationId); + System.out.println("Analysis result deleted successfully!"); + // END: com.azure.ai.contentunderstanding.deleteResult + + System.out.println("\nSample completed successfully!"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample13_DeleteResultAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample13_DeleteResultAsync.java new file mode 100644 index 000000000000..fd309b6218e2 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample13_DeleteResultAsync.java @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.Arrays; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrates how to delete analysis results after they are no longer needed + * using the async client. + */ +public class Sample13_DeleteResultAsync { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample13Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the async client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample13Async.buildClient + + System.out.println("Client initialized successfully"); + + // BEGIN: com.azure.ai.contentunderstanding.deleteResultAsync + // Step 1: Analyze a document + String documentUrl + = "https://github.com/Azure-Samples/cognitive-services-REST-api-samples/raw/master/curl/form-recognizer/sample-invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(documentUrl); + + PollerFlux poller + = client.beginAnalyze("prebuilt-invoice", Arrays.asList(input)); + + // Wait for operation to complete + System.out.println("Started analysis operation"); + + poller.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + + // Get the operation ID using the getOperationId() convenience method + // This ID is extracted from the Operation-Location header and is needed for deleteResult() + String operationId = pollResponse.getValue().getOperationId(); + System.out.println("Operation ID: " + operationId); + + return pollResponse.getFinalResult() + .map(result -> { + // Store operationId and result together for use in doOnNext + return new java.util.AbstractMap.SimpleEntry<>(operationId, result); + }); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(entry -> { + String operationId = entry.getKey(); + AnalyzeResult result = entry.getValue(); + + System.out.println("Analysis completed successfully!"); + + // Display some sample results using getValue() convenience method + if (result.getContents() != null && !result.getContents().isEmpty()) { + Object firstContent = result.getContents().get(0); + if (firstContent instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) firstContent; + java.util.Map fields = docContent.getFields(); + if (fields != null) { + System.out.println("Total fields extracted: " + fields.size()); + ContentField customerNameField = fields.get("CustomerName"); + if (customerNameField != null) { + // Use getValue() instead of casting to StringField + String customerName = (String) customerNameField.getValue(); + System.out.println("Customer Name: " + (customerName != null ? customerName : "(not found)")); + } + } + } + } + + // Step 2: Delete the analysis result using the operation ID + // This cleans up the server-side resources (including keyframe images for video analysis) + client.deleteResult(operationId) + .doOnSuccess(v -> System.out.println("Analysis result deleted successfully!")) + .subscribe(); + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + System.out.println("\nSample completed successfully!"); + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + // END: com.azure.ai.contentunderstanding.deleteResultAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.SECONDS.sleep(10); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample14_CopyAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample14_CopyAnalyzer.java new file mode 100644 index 000000000000..00f7d760ec71 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample14_CopyAnalyzer.java @@ -0,0 +1,224 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +/** + * Sample demonstrates how to copy an analyzer within the same resource. + * For cross-resource copying, see Sample15_GrantCopyAuth. + */ +public class Sample14_CopyAnalyzer { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample14.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample14.buildClient + + System.out.println("✓ Client initialized successfully with endpoint: " + endpoint); + + // Generate unique analyzer IDs for this test + String sourceAnalyzerId = "test_analyzer_source_" + UUID.randomUUID().toString().replace("-", ""); + String targetAnalyzerId = "test_analyzer_target_" + UUID.randomUUID().toString().replace("-", ""); + + try { + // BEGIN: com.azure.ai.contentunderstanding.copyAnalyzer + // Step 1: Create the source analyzer + ContentAnalyzerConfig sourceConfig = new ContentAnalyzerConfig(); + sourceConfig.setEnableFormula(false); + sourceConfig.setEnableLayout(true); + sourceConfig.setEnableOcr(true); + sourceConfig.setEstimateFieldSourceAndConfidence(true); + sourceConfig.setReturnDetails(true); + + Map fields = new HashMap<>(); + + ContentFieldDefinition companyNameField = new ContentFieldDefinition(); + companyNameField.setType(ContentFieldType.STRING); + companyNameField.setMethod(GenerationMethod.EXTRACT); + companyNameField.setDescription("Name of the company"); + fields.put("company_name", companyNameField); + + ContentFieldDefinition totalAmountField = new ContentFieldDefinition(); + totalAmountField.setType(ContentFieldType.NUMBER); + totalAmountField.setMethod(GenerationMethod.EXTRACT); + totalAmountField.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountField); + + ContentFieldSchema sourceFieldSchema = new ContentFieldSchema(); + sourceFieldSchema.setName("company_schema"); + sourceFieldSchema.setDescription("Schema for extracting company information"); + sourceFieldSchema.setFields(fields); + + ContentAnalyzer sourceAnalyzer = new ContentAnalyzer(); + sourceAnalyzer.setBaseAnalyzerId("prebuilt-document"); + sourceAnalyzer.setDescription("Source analyzer for copying"); + sourceAnalyzer.setConfig(sourceConfig); + sourceAnalyzer.setFieldSchema(sourceFieldSchema); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + sourceAnalyzer.setModels(models); + + Map tags = new HashMap<>(); + tags.put("modelType", "in_development"); + sourceAnalyzer.setTags(tags); + + // Create source analyzer + SyncPoller createPoller + = client.beginCreateAnalyzer(sourceAnalyzerId, sourceAnalyzer, true); + ContentAnalyzer sourceResult = createPoller.getFinalResult(); + System.out.println("Source analyzer '" + sourceAnalyzerId + "' created successfully!"); + + // Step 2: Copy the source analyzer to target + // Note: This copies within the same resource + SyncPoller copyPoller + = client.beginCopyAnalyzer(targetAnalyzerId, sourceAnalyzerId); + ContentAnalyzer copiedAnalyzer = copyPoller.getFinalResult(); + System.out.println("Analyzer copied to '" + targetAnalyzerId + "' successfully!"); + // END: com.azure.ai.contentunderstanding.copyAnalyzer + + // ========== VERIFICATION: Source Analyzer Creation ========== + System.out.println("\n📋 Source Analyzer Creation Verification:"); + System.out.println(" ✓ Analyzer IDs validated"); + System.out.println(" Source: " + sourceAnalyzerId); + System.out.println(" Target: " + targetAnalyzerId); + System.out.println(" ✓ Source config verified"); + System.out.println(" ✓ Source field schema verified: " + sourceFieldSchema.getName()); + System.out.println(" ✓ company_name field verified"); + System.out.println(" ✓ total_amount field verified"); + System.out.println(" ✓ Source analyzer object verified"); + System.out.println(" ✓ Source analyzer created: " + sourceAnalyzerId); + System.out.println(" ✓ Config preserved in result"); + System.out.println(" ✓ Field schema preserved: " + sourceResult.getFieldSchema().getFields().size() + " fields"); + System.out.println(" ✓ Tags preserved: " + sourceResult.getTags().size() + " tag(s)"); + System.out.println(" ✓ Models preserved: " + sourceResult.getModels().size() + " model(s)"); + + System.out.println("\n✅ Source analyzer creation completed:"); + System.out.println(" ID: " + sourceAnalyzerId); + System.out.println(" Base: " + sourceResult.getBaseAnalyzerId()); + System.out.println(" Fields: " + sourceResult.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + sourceResult.getTags().size()); + System.out.println(" Models: " + sourceResult.getModels().size()); + + // Get the source analyzer to verify retrieval + ContentAnalyzer sourceAnalyzerInfo = client.getAnalyzer(sourceAnalyzerId); + + System.out.println("\n📋 Source Analyzer Retrieval Verification:"); + System.out.println(" ✓ Source analyzer retrieved successfully"); + System.out.println(" Description: " + sourceAnalyzerInfo.getDescription()); + System.out.println(" Tags: " + String.join(", ", + sourceAnalyzerInfo.getTags() + .entrySet() + .stream() + .map(e -> e.getKey() + "=" + e.getValue()) + .toArray(String[]::new))); + + // ========== VERIFICATION: Analyzer Copy Operation ========== + System.out.println("\n📋 Analyzer Copy Verification:"); + System.out.println(" ✓ Copy operation completed"); + System.out.println(" ✓ Base properties preserved"); + System.out.println(" Base analyzer ID: " + copiedAnalyzer.getBaseAnalyzerId()); + System.out.println(" Description: '" + copiedAnalyzer.getDescription() + "'"); + System.out.println(" ✓ Field schema structure preserved"); + System.out.println(" Schema: " + copiedAnalyzer.getFieldSchema().getName()); + System.out.println(" Fields: " + copiedAnalyzer.getFieldSchema().getFields().size()); + + ContentFieldDefinition copiedCompanyField = copiedAnalyzer.getFieldSchema().getFields().get("company_name"); + System.out.println( + " ✓ company_name field: " + copiedCompanyField.getType() + " / " + copiedCompanyField.getMethod()); + + ContentFieldDefinition copiedAmountField = copiedAnalyzer.getFieldSchema().getFields().get("total_amount"); + System.out.println( + " ✓ total_amount field: " + copiedAmountField.getType() + " / " + copiedAmountField.getMethod()); + + System.out.println(" ✓ Tags preserved: " + copiedAnalyzer.getTags().size() + " tag(s)"); + System.out.println(" modelType=" + copiedAnalyzer.getTags().get("modelType")); + + System.out.println(" ✓ Config preserved"); + System.out.println(" EnableLayout: " + copiedAnalyzer.getConfig().isEnableLayout()); + System.out.println(" EnableOcr: " + copiedAnalyzer.getConfig().isEnableOcr()); + + if (copiedAnalyzer.getModels().containsKey("completion")) { + System.out.println(" ✓ Models preserved: " + copiedAnalyzer.getModels().size() + " model(s)"); + System.out.println(" completion=" + copiedAnalyzer.getModels().get("completion")); + } + + // Verify the copied analyzer via Get operation + ContentAnalyzer verifiedCopy = client.getAnalyzer(targetAnalyzerId); + + System.out.println("\n📋 Copied Analyzer Retrieval Verification:"); + System.out.println(" ✓ Copied analyzer verified via retrieval"); + + // Summary + String separator = new String(new char[60]).replace("\0", "═"); + System.out.println("\n" + separator); + System.out.println("✅ ANALYZER COPY VERIFICATION COMPLETED SUCCESSFULLY"); + System.out.println(separator); + System.out.println("Source Analyzer:"); + System.out.println(" ID: " + sourceAnalyzerId); + System.out.println(" Base: " + sourceResult.getBaseAnalyzerId()); + System.out.println(" Description: " + sourceResult.getDescription()); + System.out.println(" Fields: " + sourceResult.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + sourceResult.getTags().size()); + System.out.println(" Models: " + sourceResult.getModels().size()); + System.out.println("\nTarget Analyzer (Copied):"); + System.out.println(" ID: " + targetAnalyzerId); + System.out.println(" Base: " + copiedAnalyzer.getBaseAnalyzerId()); + System.out.println(" Description: " + copiedAnalyzer.getDescription()); + System.out.println(" Fields: " + copiedAnalyzer.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + copiedAnalyzer.getTags().size()); + System.out.println(" Models: " + copiedAnalyzer.getModels().size()); + System.out.println("\n✅ All properties successfully copied and verified!"); + System.out.println(separator); + + } catch (Exception e) { + System.err.println("Error: " + e.getMessage()); + e.printStackTrace(); + } finally { + // Cleanup: Delete the analyzers + try { + client.deleteAnalyzer(sourceAnalyzerId); + System.out.println("\nSource analyzer deleted: " + sourceAnalyzerId); + } catch (Exception e) { + System.out.println("Note: Failed to delete source analyzer (may not exist): " + e.getMessage()); + } + + try { + client.deleteAnalyzer(targetAnalyzerId); + System.out.println("Target analyzer deleted: " + targetAnalyzerId); + } catch (Exception e) { + System.out.println("Note: Failed to delete target analyzer (may not exist): " + e.getMessage()); + } + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample14_CopyAnalyzerAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample14_CopyAnalyzerAsync.java new file mode 100644 index 000000000000..89bdd5f4c4bb --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample14_CopyAnalyzerAsync.java @@ -0,0 +1,284 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrates how to copy an analyzer within the same resource using the async client. + * For cross-resource copying, see Sample15_GrantCopyAuthAsync. + */ +public class Sample14_CopyAnalyzerAsync { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample14Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + + // Build the async client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample14Async.buildClient + + System.out.println("✓ Client initialized successfully with endpoint: " + endpoint); + + // Generate unique analyzer IDs for this test + String sourceAnalyzerId = "test_analyzer_source_" + UUID.randomUUID().toString().replace("-", ""); + String targetAnalyzerId = "test_analyzer_target_" + UUID.randomUUID().toString().replace("-", ""); + + String finalSourceAnalyzerId = sourceAnalyzerId; // For use in lambda + String finalTargetAnalyzerId = targetAnalyzerId; // For use in lambda + + // BEGIN: com.azure.ai.contentunderstanding.copyAnalyzerAsync + // Step 1: Create the source analyzer + ContentAnalyzerConfig sourceConfig = new ContentAnalyzerConfig(); + sourceConfig.setEnableFormula(false); + sourceConfig.setEnableLayout(true); + sourceConfig.setEnableOcr(true); + sourceConfig.setEstimateFieldSourceAndConfidence(true); + sourceConfig.setReturnDetails(true); + + Map fields = new HashMap<>(); + + ContentFieldDefinition companyNameField = new ContentFieldDefinition(); + companyNameField.setType(ContentFieldType.STRING); + companyNameField.setMethod(GenerationMethod.EXTRACT); + companyNameField.setDescription("Name of the company"); + fields.put("company_name", companyNameField); + + ContentFieldDefinition totalAmountField = new ContentFieldDefinition(); + totalAmountField.setType(ContentFieldType.NUMBER); + totalAmountField.setMethod(GenerationMethod.EXTRACT); + totalAmountField.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountField); + + ContentFieldSchema sourceFieldSchema = new ContentFieldSchema(); + sourceFieldSchema.setName("company_schema"); + sourceFieldSchema.setDescription("Schema for extracting company information"); + sourceFieldSchema.setFields(fields); + + ContentAnalyzer sourceAnalyzer = new ContentAnalyzer(); + sourceAnalyzer.setBaseAnalyzerId("prebuilt-document"); + sourceAnalyzer.setDescription("Source analyzer for copying"); + sourceAnalyzer.setConfig(sourceConfig); + sourceAnalyzer.setFieldSchema(sourceFieldSchema); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + sourceAnalyzer.setModels(models); + + Map tags = new HashMap<>(); + tags.put("modelType", "in_development"); + sourceAnalyzer.setTags(tags); + + // Create source analyzer using reactive pattern + PollerFlux createPoller + = client.beginCreateAnalyzer(finalSourceAnalyzerId, sourceAnalyzer, true); + + createPoller.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(sourceResult -> { + System.out.println("Source analyzer '" + finalSourceAnalyzerId + "' created successfully!"); + }) + .flatMap(sourceResult -> { + // Step 2: Copy the source analyzer to target + // Note: This copies within the same resource + PollerFlux copyPoller + = client.beginCopyAnalyzer(finalTargetAnalyzerId, finalSourceAnalyzerId); + + return copyPoller.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Copy polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Copy polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .map(copiedAnalyzer -> { + System.out.println("Analyzer copied to '" + finalTargetAnalyzerId + "' successfully!"); + // Store both results for use in doOnNext + return new java.util.AbstractMap.SimpleEntry<>(sourceResult, copiedAnalyzer); + }); + }) + .doOnNext(entry -> { + ContentAnalyzer sourceResult = entry.getKey(); + ContentAnalyzer copiedAnalyzer = entry.getValue(); + + // ========== VERIFICATION: Source Analyzer Creation ========== + System.out.println("\n📋 Source Analyzer Creation Verification:"); + System.out.println(" ✓ Analyzer IDs validated"); + System.out.println(" Source: " + finalSourceAnalyzerId); + System.out.println(" Target: " + finalTargetAnalyzerId); + System.out.println(" ✓ Source config verified"); + System.out.println(" ✓ Source field schema verified: " + sourceFieldSchema.getName()); + System.out.println(" ✓ company_name field verified"); + System.out.println(" ✓ total_amount field verified"); + System.out.println(" ✓ Source analyzer object verified"); + System.out.println(" ✓ Source analyzer created: " + finalSourceAnalyzerId); + System.out.println(" ✓ Config preserved in result"); + System.out.println(" ✓ Field schema preserved: " + sourceResult.getFieldSchema().getFields().size() + " fields"); + System.out.println(" ✓ Tags preserved: " + sourceResult.getTags().size() + " tag(s)"); + System.out.println(" ✓ Models preserved: " + sourceResult.getModels().size() + " model(s)"); + + System.out.println("\n✅ Source analyzer creation completed:"); + System.out.println(" ID: " + finalSourceAnalyzerId); + System.out.println(" Base: " + sourceResult.getBaseAnalyzerId()); + System.out.println(" Fields: " + sourceResult.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + sourceResult.getTags().size()); + System.out.println(" Models: " + sourceResult.getModels().size()); + + // ========== VERIFICATION: Analyzer Copy Operation ========== + System.out.println("\n📋 Analyzer Copy Verification:"); + System.out.println(" ✓ Copy operation completed"); + System.out.println(" ✓ Base properties preserved"); + System.out.println(" Base analyzer ID: " + copiedAnalyzer.getBaseAnalyzerId()); + System.out.println(" Description: '" + copiedAnalyzer.getDescription() + "'"); + System.out.println(" ✓ Field schema structure preserved"); + System.out.println(" Schema: " + copiedAnalyzer.getFieldSchema().getName()); + System.out.println(" Fields: " + copiedAnalyzer.getFieldSchema().getFields().size()); + + ContentFieldDefinition copiedCompanyField = copiedAnalyzer.getFieldSchema().getFields().get("company_name"); + System.out.println(" ✓ company_name field: " + copiedCompanyField.getType() + " / " + + copiedCompanyField.getMethod()); + + ContentFieldDefinition copiedAmountField = copiedAnalyzer.getFieldSchema().getFields().get("total_amount"); + System.out.println(" ✓ total_amount field: " + copiedAmountField.getType() + " / " + + copiedAmountField.getMethod()); + + System.out.println(" ✓ Tags preserved: " + copiedAnalyzer.getTags().size() + " tag(s)"); + System.out.println(" modelType=" + copiedAnalyzer.getTags().get("modelType")); + + System.out.println(" ✓ Config preserved"); + System.out.println(" EnableLayout: " + copiedAnalyzer.getConfig().isEnableLayout()); + System.out.println(" EnableOcr: " + copiedAnalyzer.getConfig().isEnableOcr()); + + if (copiedAnalyzer.getModels().containsKey("completion")) { + System.out.println(" ✓ Models preserved: " + copiedAnalyzer.getModels().size() + " model(s)"); + System.out.println(" completion=" + copiedAnalyzer.getModels().get("completion")); + } + + // Summary + String separator = new String(new char[60]).replace("\0", "═"); + System.out.println("\n" + separator); + System.out.println("✅ ANALYZER COPY VERIFICATION COMPLETED SUCCESSFULLY"); + System.out.println(separator); + System.out.println("Source Analyzer:"); + System.out.println(" ID: " + finalSourceAnalyzerId); + System.out.println(" Base: " + sourceResult.getBaseAnalyzerId()); + System.out.println(" Description: " + sourceResult.getDescription()); + System.out.println(" Fields: " + sourceResult.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + sourceResult.getTags().size()); + System.out.println(" Models: " + sourceResult.getModels().size()); + System.out.println("\nTarget Analyzer (Copied):"); + System.out.println(" ID: " + finalTargetAnalyzerId); + System.out.println(" Base: " + copiedAnalyzer.getBaseAnalyzerId()); + System.out.println(" Description: " + copiedAnalyzer.getDescription()); + System.out.println(" Fields: " + copiedAnalyzer.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + copiedAnalyzer.getTags().size()); + System.out.println(" Models: " + copiedAnalyzer.getModels().size()); + System.out.println("\n✅ All properties successfully copied and verified!"); + System.out.println(separator); + }) + .then(client.getAnalyzer(finalSourceAnalyzerId)) + .doOnNext(sourceAnalyzerInfo -> { + System.out.println("\n📋 Source Analyzer Retrieval Verification:"); + System.out.println(" ✓ Source analyzer retrieved successfully"); + System.out.println(" Description: " + sourceAnalyzerInfo.getDescription()); + System.out.println(" Tags: " + String.join(", ", + sourceAnalyzerInfo.getTags() + .entrySet() + .stream() + .map(e -> e.getKey() + "=" + e.getValue()) + .toArray(String[]::new))); + }) + .then(client.getAnalyzer(finalTargetAnalyzerId)) + .doOnNext(verifiedCopy -> { + System.out.println("\n📋 Copied Analyzer Retrieval Verification:"); + System.out.println(" ✓ Copied analyzer verified via retrieval"); + }) + .then(Mono.fromRunnable(() -> { + // Cleanup: Delete the analyzers + System.out.println("\nCleaning up analyzers..."); + })) + .then(client.deleteAnalyzer(finalSourceAnalyzerId) + .onErrorResume(e -> { + System.out.println("Note: Failed to delete source analyzer (may not exist): " + e.getMessage()); + return Mono.empty(); + }) + .doOnSuccess(v -> System.out.println("Source analyzer deleted: " + finalSourceAnalyzerId)) + ) + .then(client.deleteAnalyzer(finalTargetAnalyzerId) + .onErrorResume(e -> { + System.out.println("Note: Failed to delete target analyzer (may not exist): " + e.getMessage()); + return Mono.empty(); + }) + .doOnSuccess(v -> System.out.println("Target analyzer deleted: " + finalTargetAnalyzerId)) + ) + .doOnError(error -> { + System.err.println("Error: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + // Still try to cleanup + client.deleteAnalyzer(finalSourceAnalyzerId) + .onErrorResume(e -> Mono.empty()) + .subscribe(); + client.deleteAnalyzer(finalTargetAnalyzerId) + .onErrorResume(e -> Mono.empty()) + .subscribe(); + System.exit(1); + } + ); + // END: com.azure.ai.contentunderstanding.copyAnalyzerAsync + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.SECONDS.sleep(60); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample15_GrantCopyAuth.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample15_GrantCopyAuth.java new file mode 100644 index 000000000000..1212ab0fea4d --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample15_GrantCopyAuth.java @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.CopyAuthorization; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.HashMap; +import java.util.Map; + +/** + * Sample demonstrates how to grant copy authorization and copy an analyzer from a source + * Microsoft Foundry resource to a target Microsoft Foundry resource (cross-resource copying). + * + *

For same-resource copying, see Sample14_CopyAnalyzer.

+ * + *

Required environment variables:

+ *
    + *
  • CONTENTUNDERSTANDING_ENDPOINT: Source resource endpoint
  • + *
  • CONTENTUNDERSTANDING_KEY (optional): API key for source resource
  • + *
  • CONTENTUNDERSTANDING_SOURCE_RESOURCE_ID: Azure resource ID of the source resource
  • + *
  • CONTENTUNDERSTANDING_SOURCE_REGION: Region of the source resource
  • + *
  • CONTENTUNDERSTANDING_TARGET_ENDPOINT: Endpoint of the target resource
  • + *
  • CONTENTUNDERSTANDING_TARGET_KEY (optional): API key for target resource
  • + *
  • CONTENTUNDERSTANDING_TARGET_RESOURCE_ID: Azure resource ID of the target resource
  • + *
  • CONTENTUNDERSTANDING_TARGET_REGION: Region of the target resource
  • + *
+ * + *

Note: If API keys are not provided, DefaultAzureCredential will be used. + * Cross-resource copying with DefaultAzureCredential requires 'Cognitive Services User' role + * on both source and target resources.

+ */ +public class Sample15_GrantCopyAuth { + + public static void main(String[] args) { + // Get configuration from environment variables + String sourceEndpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String sourceKey = System.getenv("CONTENTUNDERSTANDING_KEY"); + String sourceResourceId = System.getenv("CONTENTUNDERSTANDING_SOURCE_RESOURCE_ID"); + String sourceRegion = System.getenv("CONTENTUNDERSTANDING_SOURCE_REGION"); + String targetEndpoint = System.getenv("CONTENTUNDERSTANDING_TARGET_ENDPOINT"); + String targetKey = System.getenv("CONTENTUNDERSTANDING_TARGET_KEY"); + String targetResourceId = System.getenv("CONTENTUNDERSTANDING_TARGET_RESOURCE_ID"); + String targetRegion = System.getenv("CONTENTUNDERSTANDING_TARGET_REGION"); + + // Validate required environment variables + if (sourceEndpoint == null || targetEndpoint == null || sourceResourceId == null + || targetResourceId == null || sourceRegion == null || targetRegion == null) { + System.out.println("Cross-resource copying requires the following environment variables:"); + System.out.println(" - CONTENTUNDERSTANDING_ENDPOINT: Source resource endpoint"); + System.out.println(" - CONTENTUNDERSTANDING_KEY (optional): API key for source resource"); + System.out.println(" - CONTENTUNDERSTANDING_SOURCE_RESOURCE_ID: Azure resource ID of the source resource"); + System.out.println(" - CONTENTUNDERSTANDING_SOURCE_REGION: Region of the source resource"); + System.out.println(" - CONTENTUNDERSTANDING_TARGET_ENDPOINT: Endpoint of the target resource"); + System.out.println(" - CONTENTUNDERSTANDING_TARGET_KEY (optional): API key for target resource"); + System.out.println(" - CONTENTUNDERSTANDING_TARGET_RESOURCE_ID: Azure resource ID of the target resource"); + System.out.println(" - CONTENTUNDERSTANDING_TARGET_REGION: Region of the target resource"); + return; + } + + // BEGIN: com.azure.ai.contentunderstanding.grantCopyAuth + // Build source client with appropriate authentication + ContentUnderstandingClientBuilder sourceBuilder = new ContentUnderstandingClientBuilder() + .endpoint(sourceEndpoint); + ContentUnderstandingClient sourceClient; + if (sourceKey != null && !sourceKey.trim().isEmpty()) { + sourceClient = sourceBuilder.credential(new AzureKeyCredential(sourceKey)).buildClient(); + } else { + sourceClient = sourceBuilder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + + // Build target client with appropriate authentication + ContentUnderstandingClientBuilder targetBuilder = new ContentUnderstandingClientBuilder() + .endpoint(targetEndpoint); + ContentUnderstandingClient targetClient; + if (targetKey != null && !targetKey.trim().isEmpty()) { + targetClient = targetBuilder.credential(new AzureKeyCredential(targetKey)).buildClient(); + } else { + targetClient = targetBuilder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + + String sourceAnalyzerId = "my_source_analyzer"; + String targetAnalyzerId = "my_target_analyzer"; + + // Step 1: Create the source analyzer + ContentAnalyzerConfig config = new ContentAnalyzerConfig(); + config.setEnableLayout(true); + config.setEnableOcr(true); + + Map fields = new HashMap<>(); + ContentFieldDefinition companyNameField = new ContentFieldDefinition(); + companyNameField.setType(ContentFieldType.STRING); + companyNameField.setMethod(GenerationMethod.EXTRACT); + companyNameField.setDescription("Name of the company"); + fields.put("company_name", companyNameField); + + ContentFieldDefinition totalAmountField = new ContentFieldDefinition(); + totalAmountField.setType(ContentFieldType.NUMBER); + totalAmountField.setMethod(GenerationMethod.EXTRACT); + totalAmountField.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountField); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("company_schema"); + fieldSchema.setDescription("Schema for extracting company information"); + fieldSchema.setFields(fields); + + ContentAnalyzer sourceAnalyzer = new ContentAnalyzer(); + sourceAnalyzer.setBaseAnalyzerId("prebuilt-document"); + sourceAnalyzer.setDescription("Source analyzer for cross-resource copying"); + sourceAnalyzer.setConfig(config); + sourceAnalyzer.setFieldSchema(fieldSchema); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + sourceAnalyzer.setModels(models); + + SyncPoller createPoller + = sourceClient.beginCreateAnalyzer(sourceAnalyzerId, sourceAnalyzer); + ContentAnalyzer sourceResult = createPoller.getFinalResult(); + System.out.println("Source analyzer '" + sourceAnalyzerId + "' created successfully!"); + + try { + // Step 2: Grant copy authorization on source client + CopyAuthorization copyAuth = sourceClient.grantCopyAuthorization( + sourceAnalyzerId, targetResourceId, targetRegion); + + System.out.println("Copy authorization granted successfully!"); + System.out.println(" Target Azure Resource ID: " + copyAuth.getTargetAzureResourceId()); + System.out.println(" Expires at: " + copyAuth.getExpiresAt()); + + // Step 3: Copy analyzer to target resource using target client + SyncPoller copyPoller + = targetClient.beginCopyAnalyzer(targetAnalyzerId, sourceAnalyzerId, false, + sourceResourceId, sourceRegion); + + ContentAnalyzer targetResult = copyPoller.getFinalResult(); + System.out.println("Target analyzer '" + targetAnalyzerId + "' copied successfully!"); + System.out.println(" Description: " + targetResult.getDescription()); + // END: com.azure.ai.contentunderstanding.grantCopyAuth + + } finally { + // Cleanup: delete both analyzers + try { + sourceClient.deleteAnalyzer(sourceAnalyzerId); + System.out.println("Source analyzer '" + sourceAnalyzerId + "' deleted."); + } catch (Exception e) { + // Ignore cleanup errors + } + + try { + targetClient.deleteAnalyzer(targetAnalyzerId); + System.out.println("Target analyzer '" + targetAnalyzerId + "' deleted."); + } catch (Exception e) { + // Ignore cleanup errors + } + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample15_GrantCopyAuthAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample15_GrantCopyAuthAsync.java new file mode 100644 index 000000000000..67050831732b --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample15_GrantCopyAuthAsync.java @@ -0,0 +1,226 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.CopyAuthorization; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrates how to grant copy authorization and copy an analyzer from a source + * Microsoft Foundry resource to a target Microsoft Foundry resource (cross-resource copying) + * using the async client. + * + *

For same-resource copying, see Sample14_CopyAnalyzerAsync.

+ * + *

Required environment variables:

+ *
    + *
  • CONTENTUNDERSTANDING_ENDPOINT: Source resource endpoint
  • + *
  • CONTENTUNDERSTANDING_KEY (optional): API key for source resource
  • + *
  • CONTENTUNDERSTANDING_SOURCE_RESOURCE_ID: Azure resource ID of the source resource
  • + *
  • CONTENTUNDERSTANDING_SOURCE_REGION: Region of the source resource
  • + *
  • CONTENTUNDERSTANDING_TARGET_ENDPOINT: Endpoint of the target resource
  • + *
  • CONTENTUNDERSTANDING_TARGET_KEY (optional): API key for target resource
  • + *
  • CONTENTUNDERSTANDING_TARGET_RESOURCE_ID: Azure resource ID of the target resource
  • + *
  • CONTENTUNDERSTANDING_TARGET_REGION: Region of the target resource
  • + *
+ * + *

Note: If API keys are not provided, DefaultAzureCredential will be used. + * Cross-resource copying with DefaultAzureCredential requires 'Cognitive Services User' role + * on both source and target resources.

+ */ +public class Sample15_GrantCopyAuthAsync { + + public static void main(String[] args) { + // Get configuration from environment variables + String sourceEndpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String sourceKey = System.getenv("CONTENTUNDERSTANDING_KEY"); + String sourceResourceId = System.getenv("CONTENTUNDERSTANDING_SOURCE_RESOURCE_ID"); + String sourceRegion = System.getenv("CONTENTUNDERSTANDING_SOURCE_REGION"); + String targetEndpoint = System.getenv("CONTENTUNDERSTANDING_TARGET_ENDPOINT"); + String targetKey = System.getenv("CONTENTUNDERSTANDING_TARGET_KEY"); + String targetResourceId = System.getenv("CONTENTUNDERSTANDING_TARGET_RESOURCE_ID"); + String targetRegion = System.getenv("CONTENTUNDERSTANDING_TARGET_REGION"); + + // Validate required environment variables + if (sourceEndpoint == null || targetEndpoint == null || sourceResourceId == null + || targetResourceId == null || sourceRegion == null || targetRegion == null) { + System.out.println("Cross-resource copying requires the following environment variables:"); + System.out.println(" - CONTENTUNDERSTANDING_ENDPOINT: Source resource endpoint"); + System.out.println(" - CONTENTUNDERSTANDING_KEY (optional): API key for source resource"); + System.out.println(" - CONTENTUNDERSTANDING_SOURCE_RESOURCE_ID: Azure resource ID of the source resource"); + System.out.println(" - CONTENTUNDERSTANDING_SOURCE_REGION: Region of the source resource"); + System.out.println(" - CONTENTUNDERSTANDING_TARGET_ENDPOINT: Endpoint of the target resource"); + System.out.println(" - CONTENTUNDERSTANDING_TARGET_KEY (optional): API key for target resource"); + System.out.println(" - CONTENTUNDERSTANDING_TARGET_RESOURCE_ID: Azure resource ID of the target resource"); + System.out.println(" - CONTENTUNDERSTANDING_TARGET_REGION: Region of the target resource"); + return; + } + + // BEGIN: com.azure.ai.contentunderstanding.grantCopyAuthAsync + // Build source async client with appropriate authentication + ContentUnderstandingClientBuilder sourceBuilder = new ContentUnderstandingClientBuilder() + .endpoint(sourceEndpoint); + ContentUnderstandingAsyncClient sourceClient; + if (sourceKey != null && !sourceKey.trim().isEmpty()) { + sourceClient = sourceBuilder.credential(new AzureKeyCredential(sourceKey)).buildAsyncClient(); + } else { + sourceClient = sourceBuilder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + + // Build target async client with appropriate authentication + ContentUnderstandingClientBuilder targetBuilder = new ContentUnderstandingClientBuilder() + .endpoint(targetEndpoint); + ContentUnderstandingAsyncClient targetClient; + if (targetKey != null && !targetKey.trim().isEmpty()) { + targetClient = targetBuilder.credential(new AzureKeyCredential(targetKey)).buildAsyncClient(); + } else { + targetClient = targetBuilder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + + String sourceAnalyzerId = "my_source_analyzer"; + String targetAnalyzerId = "my_target_analyzer"; + + // Step 1: Create the source analyzer + ContentAnalyzerConfig config = new ContentAnalyzerConfig(); + config.setEnableLayout(true); + config.setEnableOcr(true); + + Map fields = new HashMap<>(); + ContentFieldDefinition companyNameField = new ContentFieldDefinition(); + companyNameField.setType(ContentFieldType.STRING); + companyNameField.setMethod(GenerationMethod.EXTRACT); + companyNameField.setDescription("Name of the company"); + fields.put("company_name", companyNameField); + + ContentFieldDefinition totalAmountField = new ContentFieldDefinition(); + totalAmountField.setType(ContentFieldType.NUMBER); + totalAmountField.setMethod(GenerationMethod.EXTRACT); + totalAmountField.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountField); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("company_schema"); + fieldSchema.setDescription("Schema for extracting company information"); + fieldSchema.setFields(fields); + + ContentAnalyzer sourceAnalyzer = new ContentAnalyzer(); + sourceAnalyzer.setBaseAnalyzerId("prebuilt-document"); + sourceAnalyzer.setDescription("Source analyzer for cross-resource copying"); + sourceAnalyzer.setConfig(config); + sourceAnalyzer.setFieldSchema(fieldSchema); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + sourceAnalyzer.setModels(models); + + PollerFlux createPoller + = sourceClient.beginCreateAnalyzer(sourceAnalyzerId, sourceAnalyzer); + + String finalSourceAnalyzerId = sourceAnalyzerId; // For use in lambda + String finalTargetAnalyzerId = targetAnalyzerId; // For use in lambda + String finalSourceResourceId = sourceResourceId; // For use in lambda + String finalSourceRegion = sourceRegion; // For use in lambda + String finalTargetResourceId = targetResourceId; // For use in lambda + String finalTargetRegion = targetRegion; // For use in lambda + + createPoller.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(sourceResult -> { + System.out.println("Source analyzer '" + finalSourceAnalyzerId + "' created successfully!"); + }) + .then(sourceClient.grantCopyAuthorization(finalSourceAnalyzerId, finalTargetResourceId, finalTargetRegion)) + .doOnNext(copyAuth -> { + System.out.println("Copy authorization granted successfully!"); + System.out.println(" Target Azure Resource ID: " + copyAuth.getTargetAzureResourceId()); + System.out.println(" Expires at: " + copyAuth.getExpiresAt()); + }) + .flatMap(copyAuth -> { + // Step 3: Copy analyzer to target resource using target async client + PollerFlux copyPoller + = targetClient.beginCopyAnalyzer(finalTargetAnalyzerId, finalSourceAnalyzerId, false, + finalSourceResourceId, finalSourceRegion); + + return copyPoller.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Copy polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Copy polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }); + }) + .doOnNext(targetResult -> { + System.out.println("Target analyzer '" + finalTargetAnalyzerId + "' copied successfully!"); + System.out.println(" Description: " + targetResult.getDescription()); + // END: com.azure.ai.contentunderstanding.grantCopyAuthAsync + }) + .doFinally(signalType -> { + // Cleanup: delete both analyzers + sourceClient.deleteAnalyzer(finalSourceAnalyzerId) + .onErrorResume(e -> { + System.out.println("Note: Failed to delete source analyzer (may not exist): " + e.getMessage()); + return Mono.empty(); + }) + .doOnSuccess(v -> System.out.println("Source analyzer '" + finalSourceAnalyzerId + "' deleted.")) + .subscribe(); + + targetClient.deleteAnalyzer(finalTargetAnalyzerId) + .onErrorResume(e -> { + System.out.println("Note: Failed to delete target analyzer (may not exist): " + e.getMessage()); + return Mono.empty(); + }) + .doOnSuccess(v -> System.out.println("Target analyzer '" + finalTargetAnalyzerId + "' deleted.")) + .subscribe(); + }) + .doOnError(error -> { + System.err.println("Error occurred: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.SECONDS.sleep(60); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample16_CreateAnalyzerWithLabels.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample16_CreateAnalyzerWithLabels.java new file mode 100644 index 000000000000..8c737a0f5ca3 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample16_CreateAnalyzerWithLabels.java @@ -0,0 +1,209 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.ai.contentunderstanding.models.KnowledgeSource; +import com.azure.ai.contentunderstanding.models.LabeledDataKnowledgeSource; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +/** + * Sample demonstrates how to create an analyzer with labeled training data from Azure Blob Storage. + * + * Required environment variables: + * - CONTENTUNDERSTANDING_ENDPOINT: Azure Content Understanding endpoint URL + * - CONTENTUNDERSTANDING_KEY: Azure Content Understanding API key (optional if using DefaultAzureCredential) + * + * Optional environment variables: + * - TRAINING_DATA_SAS_URL: SAS URL for the container with labeled training data + * If set, the analyzer will be created with labeled data knowledge source. + * If not set, the analyzer will be created without training data (demonstration mode). + */ +public class Sample16_CreateAnalyzerWithLabels { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample16.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + String sasUrl = System.getenv("TRAINING_DATA_SAS_URL"); + + // Build the client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + // END: com.azure.ai.contentunderstanding.sample16.buildClient + + System.out.println("Client initialized successfully"); + + String analyzerId = "test_receipt_analyzer_" + UUID.randomUUID().toString().replace("-", ""); + + try { + // BEGIN: com.azure.ai.contentunderstanding.createAnalyzerWithLabels + // Step 1: Define field schema for receipt extraction + Map fields = new HashMap<>(); + + // MerchantName field + ContentFieldDefinition merchantNameField = new ContentFieldDefinition(); + merchantNameField.setType(ContentFieldType.STRING); + merchantNameField.setMethod(GenerationMethod.EXTRACT); + merchantNameField.setDescription("Name of the merchant"); + fields.put("MerchantName", merchantNameField); + + // Items array field - define item structure + ContentFieldDefinition itemDefinition = new ContentFieldDefinition(); + itemDefinition.setType(ContentFieldType.OBJECT); + itemDefinition.setMethod(GenerationMethod.EXTRACT); + itemDefinition.setDescription("Individual item details"); + + Map itemProperties = new HashMap<>(); + + ContentFieldDefinition quantityField = new ContentFieldDefinition(); + quantityField.setType(ContentFieldType.STRING); + quantityField.setMethod(GenerationMethod.EXTRACT); + quantityField.setDescription("Quantity of the item"); + itemProperties.put("Quantity", quantityField); + + ContentFieldDefinition nameField = new ContentFieldDefinition(); + nameField.setType(ContentFieldType.STRING); + nameField.setMethod(GenerationMethod.EXTRACT); + nameField.setDescription("Name of the item"); + itemProperties.put("Name", nameField); + + ContentFieldDefinition priceField = new ContentFieldDefinition(); + priceField.setType(ContentFieldType.STRING); + priceField.setMethod(GenerationMethod.EXTRACT); + priceField.setDescription("Price of the item"); + itemProperties.put("Price", priceField); + + itemDefinition.setProperties(itemProperties); + + // Items array field + ContentFieldDefinition itemsField = new ContentFieldDefinition(); + itemsField.setType(ContentFieldType.ARRAY); + itemsField.setMethod(GenerationMethod.GENERATE); + itemsField.setDescription("List of items purchased"); + itemsField.setItemDefinition(itemDefinition); + fields.put("Items", itemsField); + + // Total field + ContentFieldDefinition totalField = new ContentFieldDefinition(); + totalField.setType(ContentFieldType.STRING); + totalField.setMethod(GenerationMethod.EXTRACT); + totalField.setDescription("Total amount"); + fields.put("Total", totalField); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("receipt_schema"); + fieldSchema.setDescription("Schema for receipt extraction with items"); + fieldSchema.setFields(fields); + + // Step 2: Create labeled data knowledge source (optional, based on environment variable) + List knowledgeSources = new ArrayList<>(); + if (sasUrl != null && !sasUrl.trim().isEmpty()) { + LabeledDataKnowledgeSource knowledgeSource = new LabeledDataKnowledgeSource() + .setContainerUrl(sasUrl); + knowledgeSources.add(knowledgeSource); + System.out.println("Using labeled training data from: " + sasUrl.substring(0, Math.min(50, sasUrl.length())) + "..."); + } else { + System.out.println("No TRAINING_DATA_SAS_URL set, creating analyzer without labeled training data"); + } + + // Step 3: Create analyzer (with or without labeled data) + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Receipt analyzer with labeled training data") + .setConfig(new ContentAnalyzerConfig() + .setEnableLayout(true) + .setEnableOcr(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + if (!knowledgeSources.isEmpty()) { + analyzer.setKnowledgeSources(knowledgeSources); + } + + // For demonstration without actual training data, create analyzer without knowledge sources + SyncPoller createPoller + = client.beginCreateAnalyzer(analyzerId, analyzer, true); + ContentAnalyzer result = createPoller.getFinalResult(); + + System.out.println("Analyzer created: " + analyzerId); + System.out.println(" Description: " + result.getDescription()); + System.out.println(" Base analyzer: " + result.getBaseAnalyzerId()); + System.out.println(" Fields: " + result.getFieldSchema().getFields().size()); + // END: com.azure.ai.contentunderstanding.createAnalyzerWithLabels + + // Verify analyzer creation + System.out.println("\n📋 Analyzer Creation Verification:"); + System.out.println("Analyzer created successfully"); + + // Verify field schema + Map resultFields = result.getFieldSchema().getFields(); + System.out.println("Field schema verified:"); + System.out.println(" MerchantName: String (Extract)"); + System.out.println(" Items: Array of Objects (Generate)"); + System.out.println(" - Quantity, Name, Price"); + System.out.println(" Total: String (Extract)"); + + ContentFieldDefinition itemsFieldResult = resultFields.get("Items"); + System.out.println("Items field verified:"); + System.out.println(" Type: " + itemsFieldResult.getType()); + System.out.println(" Item properties: " + itemsFieldResult.getItemDefinition().getProperties().size()); + + // Display API pattern information + System.out.println("\n📚 CreateAnalyzerWithLabels API Pattern:"); + System.out.println(" 1. Define field schema with nested structures (arrays, objects)"); + System.out.println(" 2. Upload training data to Azure Blob Storage:"); + System.out.println(" - Documents: receipt1.pdf, receipt2.pdf, ..."); + System.out.println(" - Labels: receipt1.pdf.labels.json, receipt2.pdf.labels.json, ..."); + System.out.println(" - OCR: receipt1.pdf.result.json, receipt2.pdf.result.json, ..."); + System.out.println(" 3. Create LabeledDataKnowledgeSource with storage SAS URL"); + System.out.println(" 4. Create analyzer with field schema and knowledge sources"); + System.out.println(" 5. Use analyzer for document analysis"); + + System.out.println("\n✅ CreateAnalyzerWithLabels pattern demonstration completed"); + System.out.println(" Note: This sample demonstrates the API pattern."); + System.out.println(" For actual training, provide TRAINING_DATA_SAS_URL with labeled data."); + + } catch (Exception e) { + System.err.println("Error: " + e.getMessage()); + e.printStackTrace(); + } finally { + // Cleanup + try { + client.deleteAnalyzer(analyzerId); + System.out.println("\nAnalyzer deleted: " + analyzerId); + } catch (Exception e) { + System.out.println("Note: Failed to delete analyzer: " + e.getMessage()); + } + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample16_CreateAnalyzerWithLabelsAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample16_CreateAnalyzerWithLabelsAsync.java new file mode 100644 index 000000000000..9a0a54d98eca --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/java/com/azure/ai/contentunderstanding/samples/Sample16_CreateAnalyzerWithLabelsAsync.java @@ -0,0 +1,243 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.ai.contentunderstanding.models.KnowledgeSource; +import com.azure.ai.contentunderstanding.models.LabeledDataKnowledgeSource; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +/** + * Sample demonstrates how to create an analyzer with labeled training data from Azure Blob Storage + * using the async client. + * + * Required environment variables: + * - CONTENTUNDERSTANDING_ENDPOINT: Azure Content Understanding endpoint URL + * - CONTENTUNDERSTANDING_KEY: Azure Content Understanding API key (optional if using DefaultAzureCredential) + * + * Optional environment variables: + * - TRAINING_DATA_SAS_URL: SAS URL for the container with labeled training data + * If set, the analyzer will be created with labeled data knowledge source. + * If not set, the analyzer will be created without training data (demonstration mode). + */ +public class Sample16_CreateAnalyzerWithLabelsAsync { + + public static void main(String[] args) { + // BEGIN: com.azure.ai.contentunderstanding.sample16Async.buildClient + String endpoint = System.getenv("CONTENTUNDERSTANDING_ENDPOINT"); + String key = System.getenv("CONTENTUNDERSTANDING_KEY"); + String sasUrl = System.getenv("TRAINING_DATA_SAS_URL"); + + // Build the async client with appropriate authentication + ContentUnderstandingClientBuilder builder = new ContentUnderstandingClientBuilder().endpoint(endpoint); + + ContentUnderstandingAsyncClient client; + if (key != null && !key.trim().isEmpty()) { + // Use API key authentication + client = builder.credential(new AzureKeyCredential(key)).buildAsyncClient(); + } else { + // Use default Azure credential (for managed identity, Azure CLI, etc.) + client = builder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + // END: com.azure.ai.contentunderstanding.sample16Async.buildClient + + System.out.println("Client initialized successfully"); + + String analyzerId = "test_receipt_analyzer_" + UUID.randomUUID().toString().replace("-", ""); + String finalAnalyzerId = analyzerId; // For use in lambda + + // BEGIN: com.azure.ai.contentunderstanding.createAnalyzerWithLabelsAsync + // Step 1: Define field schema for receipt extraction + Map fields = new HashMap<>(); + + // MerchantName field + ContentFieldDefinition merchantNameField = new ContentFieldDefinition(); + merchantNameField.setType(ContentFieldType.STRING); + merchantNameField.setMethod(GenerationMethod.EXTRACT); + merchantNameField.setDescription("Name of the merchant"); + fields.put("MerchantName", merchantNameField); + + // Items array field - define item structure + ContentFieldDefinition itemDefinition = new ContentFieldDefinition(); + itemDefinition.setType(ContentFieldType.OBJECT); + itemDefinition.setMethod(GenerationMethod.EXTRACT); + itemDefinition.setDescription("Individual item details"); + + Map itemProperties = new HashMap<>(); + + ContentFieldDefinition quantityField = new ContentFieldDefinition(); + quantityField.setType(ContentFieldType.STRING); + quantityField.setMethod(GenerationMethod.EXTRACT); + quantityField.setDescription("Quantity of the item"); + itemProperties.put("Quantity", quantityField); + + ContentFieldDefinition nameField = new ContentFieldDefinition(); + nameField.setType(ContentFieldType.STRING); + nameField.setMethod(GenerationMethod.EXTRACT); + nameField.setDescription("Name of the item"); + itemProperties.put("Name", nameField); + + ContentFieldDefinition priceField = new ContentFieldDefinition(); + priceField.setType(ContentFieldType.STRING); + priceField.setMethod(GenerationMethod.EXTRACT); + priceField.setDescription("Price of the item"); + itemProperties.put("Price", priceField); + + itemDefinition.setProperties(itemProperties); + + // Items array field + ContentFieldDefinition itemsField = new ContentFieldDefinition(); + itemsField.setType(ContentFieldType.ARRAY); + itemsField.setMethod(GenerationMethod.GENERATE); + itemsField.setDescription("List of items purchased"); + itemsField.setItemDefinition(itemDefinition); + fields.put("Items", itemsField); + + // Total field + ContentFieldDefinition totalField = new ContentFieldDefinition(); + totalField.setType(ContentFieldType.STRING); + totalField.setMethod(GenerationMethod.EXTRACT); + totalField.setDescription("Total amount"); + fields.put("Total", totalField); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("receipt_schema"); + fieldSchema.setDescription("Schema for receipt extraction with items"); + fieldSchema.setFields(fields); + + // Step 2: Create labeled data knowledge source (optional, based on environment variable) + List knowledgeSources = new ArrayList<>(); + if (sasUrl != null && !sasUrl.trim().isEmpty()) { + LabeledDataKnowledgeSource knowledgeSource = new LabeledDataKnowledgeSource() + .setContainerUrl(sasUrl); + knowledgeSources.add(knowledgeSource); + System.out.println("Using labeled training data from: " + sasUrl.substring(0, Math.min(50, sasUrl.length())) + "..."); + } else { + System.out.println("No TRAINING_DATA_SAS_URL set, creating analyzer without labeled training data"); + } + + // Step 3: Create analyzer (with or without labeled data) + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer() + .setBaseAnalyzerId("prebuilt-document") + .setDescription("Receipt analyzer with labeled training data") + .setConfig(new ContentAnalyzerConfig() + .setEnableLayout(true) + .setEnableOcr(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + if (!knowledgeSources.isEmpty()) { + analyzer.setKnowledgeSources(knowledgeSources); + } + + // For demonstration without actual training data, create analyzer without knowledge sources + // Using reactive pattern for async operations + PollerFlux createPoller + = client.beginCreateAnalyzer(finalAnalyzerId, analyzer, true); + + createPoller.last() + .flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + System.out.println("Polling completed successfully"); + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }) + .doOnNext(result -> { + System.out.println("Analyzer created: " + finalAnalyzerId); + System.out.println(" Description: " + result.getDescription()); + System.out.println(" Base analyzer: " + result.getBaseAnalyzerId()); + System.out.println(" Fields: " + result.getFieldSchema().getFields().size()); + // END: com.azure.ai.contentunderstanding.createAnalyzerWithLabelsAsync + + // Verify analyzer creation + System.out.println("\n📋 Analyzer Creation Verification:"); + System.out.println("Analyzer created successfully"); + + // Verify field schema + Map resultFields = result.getFieldSchema().getFields(); + System.out.println("Field schema verified:"); + System.out.println(" MerchantName: String (Extract)"); + System.out.println(" Items: Array of Objects (Generate)"); + System.out.println(" - Quantity, Name, Price"); + System.out.println(" Total: String (Extract)"); + + ContentFieldDefinition itemsFieldResult = resultFields.get("Items"); + System.out.println("Items field verified:"); + System.out.println(" Type: " + itemsFieldResult.getType()); + System.out.println(" Item properties: " + itemsFieldResult.getItemDefinition().getProperties().size()); + + // Display API pattern information + System.out.println("\n📚 CreateAnalyzerWithLabels API Pattern:"); + System.out.println(" 1. Define field schema with nested structures (arrays, objects)"); + System.out.println(" 2. Upload training data to Azure Blob Storage:"); + System.out.println(" - Documents: receipt1.pdf, receipt2.pdf, ..."); + System.out.println(" - Labels: receipt1.pdf.labels.json, receipt2.pdf.labels.json, ..."); + System.out.println(" - OCR: receipt1.pdf.result.json, receipt2.pdf.result.json, ..."); + System.out.println(" 3. Create LabeledDataKnowledgeSource with storage SAS URL"); + System.out.println(" 4. Create analyzer with field schema and knowledge sources"); + System.out.println(" 5. Use analyzer for document analysis"); + + System.out.println("\n✅ CreateAnalyzerWithLabels pattern demonstration completed"); + System.out.println(" Note: This sample demonstrates the API pattern."); + System.out.println(" For actual training, provide TRAINING_DATA_SAS_URL with labeled data."); + }) + .doFinally(signalType -> { + // Cleanup using reactive pattern + client.deleteAnalyzer(finalAnalyzerId) + .onErrorResume(e -> { + System.out.println("Note: Failed to delete analyzer: " + e.getMessage()); + return Mono.empty(); + }) + .doOnSuccess(v -> System.out.println("\nAnalyzer deleted: " + finalAnalyzerId)) + .subscribe(); + }) + .doOnError(error -> { + System.err.println("Error: " + error.getMessage()); + error.printStackTrace(); + }) + .subscribe( + result -> { + // Success - operations completed + }, + error -> { + // Error already handled in doOnError + System.exit(1); + } + ); + + // The .subscribe() creation is not a blocking call. For the purpose of this example, + // we sleep the thread so the program does not end before the async operations complete. + try { + TimeUnit.SECONDS.sleep(30); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/resources/mixed_financial_docs.pdf b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/resources/mixed_financial_docs.pdf new file mode 100644 index 000000000000..2c6d57818e11 Binary files /dev/null and b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/resources/mixed_financial_docs.pdf differ diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/resources/sample_document_features.pdf b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/resources/sample_document_features.pdf new file mode 100644 index 000000000000..9f47030c0377 Binary files /dev/null and b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/resources/sample_document_features.pdf differ diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/resources/sample_invoice.pdf b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/resources/sample_invoice.pdf new file mode 100644 index 000000000000..812bcd9b30f3 Binary files /dev/null and b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/samples/resources/sample_invoice.pdf differ diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/ContentUnderstandingClientTestBase.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/ContentUnderstandingClientTestBase.java new file mode 100644 index 000000000000..fa090cf441a3 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/ContentUnderstandingClientTestBase.java @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +// The Java test files under 'generated' package are generated for your reference. +// If you wish to modify these files, please copy them out of the 'generated' package, and modify there. +// See https://aka.ms/azsdk/dpg/java/tests for guide on adding a test. + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.core.http.policy.HttpLogDetailLevel; +import com.azure.core.http.policy.HttpLogOptions; +import com.azure.core.test.TestMode; +import com.azure.core.test.TestProxyTestBase; +import com.azure.core.test.utils.MockTokenCredential; +import com.azure.core.util.Configuration; +import com.azure.identity.DefaultAzureCredentialBuilder; + +class ContentUnderstandingClientTestBase extends TestProxyTestBase { + protected ContentUnderstandingClient contentUnderstandingClient; + protected ContentUnderstandingAsyncClient contentUnderstandingAsyncClient; + + // Sanitizer IDs to remove: + // - AZSDK2003, AZSDK2030: Replace Location/Operation-Location headers with "https://example.com" + // which breaks LRO polling that relies on Operation-Location header URLs + // - AZSDK3423: Replaces $..source field with "Sanitized", breaking field source validation + // - AZSDK3430: Replaces $..id field with "Sanitized" + // - AZSDK3493: Replaces $..name field with "Sanitized", breaking fieldSchema.name validation + private static final String[] REMOVE_SANITIZER_ID + = { "AZSDK2003", "AZSDK2030", "AZSDK3423", "AZSDK3430", "AZSDK3493" }; + + @Override + protected void beforeTest() { + String endpoint = Configuration.getGlobalConfiguration().get("CONTENTUNDERSTANDING_ENDPOINT"); + if (endpoint == null || endpoint.isEmpty()) { + if (getTestMode() == TestMode.PLAYBACK) { + // In PLAYBACK, requests go through the test proxy; endpoint is only used for URL construction. + endpoint = "https://localhost"; + } else { + throw new IllegalStateException( + "Content Understanding endpoint is required. Set CONTENTUNDERSTANDING_ENDPOINT as an environment variable " + + "or system property (e.g. export CONTENTUNDERSTANDING_ENDPOINT=... or mvn test -DCONTENTUNDERSTANDING_ENDPOINT=...)."); + } + } + // Strip trailing slash to prevent double-slash in URLs + if (endpoint.endsWith("/")) { + endpoint = endpoint.substring(0, endpoint.length() - 1); + } + + ContentUnderstandingClientBuilder contentUnderstandingClientbuilder + = new ContentUnderstandingClientBuilder().endpoint(endpoint) + .httpClient(getHttpClientOrUsePlayback(getHttpClients().findFirst().orElse(null))) + .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)); + if (getTestMode() == TestMode.PLAYBACK) { + contentUnderstandingClientbuilder.credential(new MockTokenCredential()); + } else if (getTestMode() == TestMode.RECORD) { + contentUnderstandingClientbuilder.addPolicy(interceptorManager.getRecordPolicy()) + .credential(new DefaultAzureCredentialBuilder().build()); + } else if (getTestMode() == TestMode.LIVE) { + contentUnderstandingClientbuilder.credential(new DefaultAzureCredentialBuilder().build()); + } + contentUnderstandingClient = contentUnderstandingClientbuilder.buildClient(); + contentUnderstandingAsyncClient = contentUnderstandingClientbuilder.buildAsyncClient(); + + // Remove sanitizers that break LRO polling by replacing entire URLs + if (getTestMode() != TestMode.LIVE) { + interceptorManager.removeSanitizers(REMOVE_SANITIZER_ID); + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample00_UpdateDefaults.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample00_UpdateDefaults.java new file mode 100644 index 000000000000..9f427dcd9c3f --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample00_UpdateDefaults.java @@ -0,0 +1,120 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentUnderstandingDefaults; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Test class demonstrating how to configure and manage default settings for Content Understanding service. + * This test shows: + * 1. Getting current default configuration + * 2. Updating default configuration with model deployments + * 3. Verifying the updated configuration + */ +public class Sample00_UpdateDefaults extends ContentUnderstandingClientTestBase { + + @Test + public void testUpdateDefaults() { + // BEGIN:ContentUnderstandingGetDefaults + // Step 1: Get current defaults to see what's configured + System.out.println("Getting current default configuration..."); + ContentUnderstandingDefaults currentDefaults = contentUnderstandingClient.getDefaults(); + System.out.println("Current defaults retrieved successfully."); + System.out.println("Current model deployments: " + currentDefaults.getModelDeployments()); + // END:ContentUnderstandingGetDefaults + + // BEGIN:Assertion_ContentUnderstandingGetDefaults + assertNotNull(currentDefaults, "Current defaults should not be null"); + assertNotNull(currentDefaults.getModelDeployments(), "Model deployments should not be null"); + // END:Assertion_ContentUnderstandingGetDefaults + + // Step 2: Configure model deployments from environment variables + // These map model names to your deployed model names in Azure AI Foundry + System.out.println("\nConfiguring model deployments from environment variables..."); + + // Get deployment names from environment variables (with defaults) + String gpt41Deployment = getEnvOrDefault("GPT_4_1_DEPLOYMENT", "gpt-4.1"); + String gpt41MiniDeployment = getEnvOrDefault("GPT_4_1_MINI_DEPLOYMENT", "gpt-4.1-mini"); + String textEmbedding3LargeDeployment + = getEnvOrDefault("TEXT_EMBEDDING_3_LARGE_DEPLOYMENT", "text-embedding-3-large"); + + // Create model deployments map + Map modelDeployments = new HashMap<>(); + modelDeployments.put("gpt-4.1", gpt41Deployment); + modelDeployments.put("gpt-4.1-mini", gpt41MiniDeployment); + modelDeployments.put("text-embedding-3-large", textEmbedding3LargeDeployment); + + System.out.println("Model deployments to configure:"); + System.out.println(" gpt-4.1 -> " + gpt41Deployment); + System.out.println(" gpt-4.1-mini -> " + gpt41MiniDeployment); + System.out.println(" text-embedding-3-large -> " + textEmbedding3LargeDeployment); + + // BEGIN:ContentUnderstandingUpdateDefaults + // Step 3: Update defaults with the new configuration + System.out.println("\nUpdating default configuration..."); + + // Update defaults with the configuration using the typed convenience method + ContentUnderstandingDefaults updatedConfig = contentUnderstandingClient.updateDefaults(modelDeployments); + System.out.println("Defaults updated successfully."); + System.out.println("Updated model deployments: " + updatedConfig.getModelDeployments()); + // END:ContentUnderstandingUpdateDefaults + + // BEGIN:Assertion_ContentUnderstandingUpdateDefaults + assertNotNull(updatedConfig, "Updated config should not be null"); + assertNotNull(updatedConfig.getModelDeployments(), "Updated model deployments should not be null"); + assertFalse(updatedConfig.getModelDeployments().isEmpty(), "Updated model deployments should not be empty"); + // END:Assertion_ContentUnderstandingUpdateDefaults + + // BEGIN:ContentUnderstandingVerifyDefaults + // Step 4: Verify the updated configuration + System.out.println("\nVerifying updated configuration..."); + ContentUnderstandingDefaults updatedDefaults = contentUnderstandingClient.getDefaults(); + System.out.println("Updated defaults verified successfully."); + System.out.println("Updated model deployments: " + updatedDefaults.getModelDeployments()); + // END:ContentUnderstandingVerifyDefaults + + // BEGIN:Assertion_ContentUnderstandingVerifyDefaults + assertNotNull(updatedDefaults, "Verified defaults should not be null"); + assertNotNull(updatedDefaults.getModelDeployments(), "Verified model deployments should not be null"); + assertFalse(updatedDefaults.getModelDeployments().isEmpty(), "Verified model deployments should not be empty"); + + // Verify the model deployments contain the expected keys + assertTrue(updatedDefaults.getModelDeployments().containsKey("gpt-4.1"), + "Model deployments should contain gpt-4.1"); + assertTrue(updatedDefaults.getModelDeployments().containsKey("gpt-4.1-mini"), + "Model deployments should contain gpt-4.1-mini"); + assertTrue(updatedDefaults.getModelDeployments().containsKey("text-embedding-3-large"), + "Model deployments should contain text-embedding-3-large"); + + // Verify the values match what we set + assertEquals(gpt41Deployment, updatedDefaults.getModelDeployments().get("gpt-4.1"), + "gpt-4.1 deployment should match configured value"); + assertEquals(gpt41MiniDeployment, updatedDefaults.getModelDeployments().get("gpt-4.1-mini"), + "gpt-4.1-mini deployment should match configured value"); + assertEquals(textEmbedding3LargeDeployment, updatedDefaults.getModelDeployments().get("text-embedding-3-large"), + "text-embedding-3-large deployment should match configured value"); + // END:Assertion_ContentUnderstandingVerifyDefaults + + System.out.println("\nConfiguration management completed."); + } + + /** + * Gets an environment variable value or returns a default value if not set. + * + * @param envVar the environment variable name + * @param defaultValue the default value to return if the environment variable is not set + * @return the environment variable value or the default value + */ + private static String getEnvOrDefault(String envVar, String defaultValue) { + String value = System.getenv(envVar); + return (value != null && !value.trim().isEmpty()) ? value : defaultValue; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample00_UpdateDefaultsAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample00_UpdateDefaultsAsync.java new file mode 100644 index 000000000000..9e2862d90fde --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample00_UpdateDefaultsAsync.java @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentUnderstandingDefaults; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Async test class demonstrating how to configure and manage default settings for Content Understanding service. + * This test shows: + * 1. Getting current default configuration asynchronously + * 2. Updating default configuration with model deployments asynchronously + * 3. Verifying the updated configuration + */ +public class Sample00_UpdateDefaultsAsync extends ContentUnderstandingClientTestBase { + + @Test + public void testUpdateDefaultsAsync() { + // BEGIN:ContentUnderstandingGetDefaultsAsync + // Step 1: Get current defaults to see what's configured + System.out.println("Getting current default configuration..."); + ContentUnderstandingDefaults currentDefaults = contentUnderstandingAsyncClient.getDefaults().block(); + System.out.println("Current defaults retrieved successfully."); + System.out.println("Current model deployments: " + currentDefaults.getModelDeployments()); + // END:ContentUnderstandingGetDefaultsAsync + + // BEGIN:Assertion_ContentUnderstandingGetDefaultsAsync + assertNotNull(currentDefaults, "Current defaults should not be null"); + assertNotNull(currentDefaults.getModelDeployments(), "Model deployments should not be null"); + // END:Assertion_ContentUnderstandingGetDefaultsAsync + + // Step 2: Configure model deployments from environment variables + // These map model names to your deployed model names in Azure AI Foundry + System.out.println("\nConfiguring model deployments from environment variables..."); + + // Get deployment names from environment variables (with defaults) + String gpt41Deployment = getEnvOrDefault("GPT_4_1_DEPLOYMENT", "gpt-4.1"); + String gpt41MiniDeployment = getEnvOrDefault("GPT_4_1_MINI_DEPLOYMENT", "gpt-4.1-mini"); + String textEmbedding3LargeDeployment + = getEnvOrDefault("TEXT_EMBEDDING_3_LARGE_DEPLOYMENT", "text-embedding-3-large"); + + // Create model deployments map + Map modelDeployments = new HashMap<>(); + modelDeployments.put("gpt-4.1", gpt41Deployment); + modelDeployments.put("gpt-4.1-mini", gpt41MiniDeployment); + modelDeployments.put("text-embedding-3-large", textEmbedding3LargeDeployment); + + System.out.println("Model deployments to configure:"); + System.out.println(" gpt-4.1 -> " + gpt41Deployment); + System.out.println(" gpt-4.1-mini -> " + gpt41MiniDeployment); + System.out.println(" text-embedding-3-large -> " + textEmbedding3LargeDeployment); + + // BEGIN:ContentUnderstandingUpdateDefaultsAsync + // Step 3: Update defaults with the new configuration + System.out.println("\nUpdating default configuration..."); + + // Update defaults with the configuration using the typed convenience method + ContentUnderstandingDefaults updatedConfig + = contentUnderstandingAsyncClient.updateDefaults(modelDeployments).block(); + System.out.println("Defaults updated successfully."); + System.out.println("Updated model deployments: " + updatedConfig.getModelDeployments()); + // END:ContentUnderstandingUpdateDefaultsAsync + + // BEGIN:Assertion_ContentUnderstandingUpdateDefaultsAsync + assertNotNull(updatedConfig, "Updated config should not be null"); + assertNotNull(updatedConfig.getModelDeployments(), "Updated model deployments should not be null"); + assertFalse(updatedConfig.getModelDeployments().isEmpty(), "Updated model deployments should not be empty"); + // END:Assertion_ContentUnderstandingUpdateDefaultsAsync + + // BEGIN:ContentUnderstandingVerifyDefaultsAsync + // Step 4: Verify the updated configuration + System.out.println("\nVerifying updated configuration..."); + ContentUnderstandingDefaults updatedDefaults = contentUnderstandingAsyncClient.getDefaults().block(); + System.out.println("Updated defaults verified successfully."); + System.out.println("Updated model deployments: " + updatedDefaults.getModelDeployments()); + // END:ContentUnderstandingVerifyDefaultsAsync + + // BEGIN:Assertion_ContentUnderstandingVerifyDefaultsAsync + assertNotNull(updatedDefaults, "Verified defaults should not be null"); + assertNotNull(updatedDefaults.getModelDeployments(), "Verified model deployments should not be null"); + assertFalse(updatedDefaults.getModelDeployments().isEmpty(), "Verified model deployments should not be empty"); + + // Verify the model deployments contain the expected keys + assertTrue(updatedDefaults.getModelDeployments().containsKey("gpt-4.1"), + "Model deployments should contain gpt-4.1"); + assertTrue(updatedDefaults.getModelDeployments().containsKey("gpt-4.1-mini"), + "Model deployments should contain gpt-4.1-mini"); + assertTrue(updatedDefaults.getModelDeployments().containsKey("text-embedding-3-large"), + "Model deployments should contain text-embedding-3-large"); + + // Verify the values match what we set + assertEquals(gpt41Deployment, updatedDefaults.getModelDeployments().get("gpt-4.1"), + "gpt-4.1 deployment should match configured value"); + assertEquals(gpt41MiniDeployment, updatedDefaults.getModelDeployments().get("gpt-4.1-mini"), + "gpt-4.1-mini deployment should match configured value"); + assertEquals(textEmbedding3LargeDeployment, updatedDefaults.getModelDeployments().get("text-embedding-3-large"), + "text-embedding-3-large deployment should match configured value"); + // END:Assertion_ContentUnderstandingVerifyDefaultsAsync + + System.out.println("\nConfiguration management completed."); + } + + /** + * Gets an environment variable value or returns a default value if not set. + * + * @param envVar the environment variable name + * @param defaultValue the default value to return if the environment variable is not set + * @return the environment variable value or the default value + */ + private static String getEnvOrDefault(String envVar, String defaultValue) { + String value = System.getenv(envVar); + return (value != null && !value.trim().isEmpty()) ? value : defaultValue; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample01_AnalyzeBinary.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample01_AnalyzeBinary.java new file mode 100644 index 000000000000..46e9f1da1702 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample01_AnalyzeBinary.java @@ -0,0 +1,257 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentPage; +import com.azure.ai.contentunderstanding.models.DocumentTable; +import com.azure.ai.contentunderstanding.models.DocumentTableCell; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashSet; +import java.util.Set; + +/** + * Sample demonstrating how to analyze binary documents using Content Understanding service. + * This sample shows: + * 1. Loading a binary file (PDF) + * 2. Analyzing the document + * 3. Extracting markdown content + * 4. Accessing document properties (pages, tables, etc.) + */ +public class Sample01_AnalyzeBinary extends ContentUnderstandingClientTestBase { + + @Test + public void testAnalyzeBinary() throws IOException { + + // Load the sample file + String filePath = "src/test/resources/sample_invoice.pdf"; + Path path = Paths.get(filePath); + + byte[] fileBytes; + BinaryData binaryData; + boolean hasRealFile = Files.exists(path); + + // Check if sample file exists + fileBytes = Files.readAllBytes(path); + binaryData = BinaryData.fromBytes(fileBytes); + + // BEGIN:ContentUnderstandingAnalyzeBinary + // Use the simplified beginAnalyzeBinary overload - contentType defaults to "application/octet-stream" + // For PDFs, you can also explicitly specify "application/pdf" using the full method signature + SyncPoller operation + = contentUnderstandingClient.beginAnalyzeBinary("prebuilt-documentSearch", binaryData); + + AnalyzeResult result = operation.getFinalResult(); + // END:ContentUnderstandingAnalyzeBinary + + // BEGIN:Assertion_ContentUnderstandingAnalyzeBinary + if (hasRealFile) { + assertTrue(Files.exists(path), "Sample file not found at " + filePath); + } + assertTrue(fileBytes.length > 0, "File should not be empty"); + assertNotNull(binaryData, "Binary data should not be null"); + assertNotNull(operation, "Analysis operation should not be null"); + assertTrue(operation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + System.out.println("Analysis operation properties verified"); + + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result contents should not be null"); + System.out.println("Analysis result contains " + + (result.getContents() != null ? result.getContents().size() : 0) + " content(s)"); + // END:Assertion_ContentUnderstandingAnalyzeBinary + + // BEGIN:ContentUnderstandingExtractMarkdown + // A PDF file has only one content element even if it contains multiple pages + MediaContent content = null; + if (result.getContents() == null || result.getContents().isEmpty()) { + System.out.println("(No content returned from analysis)"); + } else { + content = result.getContents().get(0); + if (content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out.println(content.getMarkdown()); + } else { + System.out.println("(No markdown content available)"); + } + } + // END:ContentUnderstandingExtractMarkdown + + // BEGIN:Assertion_ContentUnderstandingExtractMarkdown + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, result.getContents().size(), "PDF file should have exactly one content element"); + assertNotNull(content, "Content should not be null"); + assertTrue(content instanceof MediaContent, "Content should be of type MediaContent"); + + // Only validate markdown content if we have a real file + if (hasRealFile && content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + assertFalse(content.getMarkdown().trim().isEmpty(), "Markdown content should not be just whitespace"); + System.out + .println("Markdown content extracted successfully (" + content.getMarkdown().length() + " characters)"); + } else { + System.out + .println("⚠️ Skipping markdown content validation (using minimal test PDF or no markdown available)"); + } + // END:Assertion_ContentUnderstandingExtractMarkdown + + // BEGIN:ContentUnderstandingAccessDocumentProperties + // Check if this is document content to access document-specific properties + if (content instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) content; + System.out.println("Document type: " + + (documentContent.getMimeType() != null ? documentContent.getMimeType() : "(unknown)")); + System.out.println("Start page: " + documentContent.getStartPageNumber()); + System.out.println("End page: " + documentContent.getEndPageNumber()); + System.out.println( + "Total pages: " + (documentContent.getEndPageNumber() - documentContent.getStartPageNumber() + 1)); + + // Check for pages + if (documentContent.getPages() != null && !documentContent.getPages().isEmpty()) { + System.out.println("Number of pages: " + documentContent.getPages().size()); + for (DocumentPage page : documentContent.getPages()) { + String unit = documentContent.getUnit() != null ? documentContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } + + // Check for tables + if (documentContent.getTables() != null && !documentContent.getTables().isEmpty()) { + System.out.println("Number of tables: " + documentContent.getTables().size()); + int tableCounter = 1; + for (DocumentTable table : documentContent.getTables()) { + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns"); + tableCounter++; + } + } + } else { + // Content is not DocumentContent - verify it's MediaContent + assertTrue(content instanceof MediaContent, "Content should be MediaContent when not DocumentContent"); + System.out.println("Content is MediaContent (not document-specific), skipping document properties"); + } + // END:ContentUnderstandingAccessDocumentProperties + + // BEGIN:Assertion_ContentUnderstandingAccessDocumentProperties + assertNotNull(content, "Content should not be null for document properties validation"); + + if (content instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) content; + + // Validate MIME type + assertNotNull(docContent.getMimeType(), "MIME type should not be null"); + assertFalse(docContent.getMimeType().trim().isEmpty(), "MIME type should not be empty"); + assertEquals("application/pdf", docContent.getMimeType(), "MIME type should be application/pdf"); + System.out.println("MIME type verified: " + docContent.getMimeType()); + + // Validate page numbers + assertTrue(docContent.getStartPageNumber() >= 1, "Start page should be >= 1"); + assertTrue(docContent.getEndPageNumber() >= docContent.getStartPageNumber(), + "End page should be >= start page"); + int totalPages = docContent.getEndPageNumber() - docContent.getStartPageNumber() + 1; + assertTrue(totalPages > 0, "Total pages should be positive"); + System.out.println("Page range verified: " + docContent.getStartPageNumber() + " to " + + docContent.getEndPageNumber() + " (" + totalPages + " pages)"); + + // Validate pages collection + if (docContent.getPages() != null && !docContent.getPages().isEmpty()) { + assertTrue(docContent.getPages().size() > 0, "Pages collection should not be empty when not null"); + assertEquals(totalPages, docContent.getPages().size(), + "Pages collection count should match calculated total pages"); + System.out.println("Pages collection verified: " + docContent.getPages().size() + " pages"); + + // Track page numbers to ensure they're sequential and unique + Set pageNumbers = new HashSet<>(); + + for (DocumentPage page : docContent.getPages()) { + assertNotNull(page, "Page object should not be null"); + assertTrue(page.getPageNumber() >= 1, "Page number should be >= 1"); + assertTrue( + page.getPageNumber() >= docContent.getStartPageNumber() + && page.getPageNumber() <= docContent.getEndPageNumber(), + "Page number " + page.getPageNumber() + " should be within document range [" + + docContent.getStartPageNumber() + ", " + docContent.getEndPageNumber() + "]"); + assertTrue(page.getWidth() > 0, + "Page " + page.getPageNumber() + " width should be > 0, but was " + page.getWidth()); + assertTrue(page.getHeight() > 0, + "Page " + page.getPageNumber() + " height should be > 0, but was " + page.getHeight()); + + // Ensure page numbers are unique + assertTrue(pageNumbers.add(page.getPageNumber()), + "Page number " + page.getPageNumber() + " appears multiple times"); + + String unit = docContent.getUnit() != null ? docContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } else { + System.out.println("⚠️ No pages collection available in document content"); + } + + // Validate tables collection + if (docContent.getTables() != null && !docContent.getTables().isEmpty()) { + assertTrue(docContent.getTables().size() > 0, "Tables collection should not be empty when not null"); + System.out.println("Tables collection verified: " + docContent.getTables().size() + " tables"); + + int tableCounter = 1; + for (DocumentTable table : docContent.getTables()) { + assertNotNull(table, "Table " + tableCounter + " should not be null"); + assertTrue(table.getRowCount() > 0, + "Table " + tableCounter + " should have at least 1 row, but had " + table.getRowCount()); + assertTrue(table.getColumnCount() > 0, + "Table " + tableCounter + " should have at least 1 column, but had " + table.getColumnCount()); + + // Validate table cells if available + if (table.getCells() != null) { + assertTrue(table.getCells().size() > 0, + "Table " + tableCounter + " cells collection should not be empty when not null"); + + for (DocumentTableCell cell : table.getCells()) { + assertNotNull(cell, "Table cell should not be null"); + assertTrue(cell.getRowIndex() >= 0 && cell.getRowIndex() < table.getRowCount(), + "Cell row index " + cell.getRowIndex() + " should be within table row count " + + table.getRowCount()); + assertTrue(cell.getColumnIndex() >= 0 && cell.getColumnIndex() < table.getColumnCount(), + "Cell column index " + cell.getColumnIndex() + " should be within table column count " + + table.getColumnCount()); + assertTrue(cell.getRowSpan() >= 1, + "Cell row span should be >= 1, but was " + cell.getRowSpan()); + assertTrue(cell.getColumnSpan() >= 1, + "Cell column span should be >= 1, but was " + cell.getColumnSpan()); + } + } + + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns" + + (table.getCells() != null ? " (" + table.getCells().size() + " cells)" : "")); + tableCounter++; + } + } else { + System.out.println("No tables found in document content"); + } + + System.out.println("All document properties validated successfully"); + } else { + // Content is not DocumentContent - validate alternative types + assertTrue(content instanceof MediaContent, + "Content should be MediaContent when not DocumentContent, but got " + + (content != null ? content.getClass().getSimpleName() : "null")); + System.out.println("Content is not DocumentContent type, skipping document-specific validations"); + System.out.println("⚠️ Content type: " + content.getClass().getSimpleName() + " (MediaContent validated)"); + } + // END:Assertion_ContentUnderstandingAccessDocumentProperties + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample01_AnalyzeBinaryAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample01_AnalyzeBinaryAsync.java new file mode 100644 index 000000000000..4c987b605b21 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample01_AnalyzeBinaryAsync.java @@ -0,0 +1,266 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentPage; +import com.azure.ai.contentunderstanding.models.DocumentTable; +import com.azure.ai.contentunderstanding.models.DocumentTableCell; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.PollerFlux; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static org.junit.jupiter.api.Assertions.*; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashSet; +import java.util.Set; + +/** + * Async sample demonstrating how to analyze binary documents using Content Understanding service. + * This sample shows: + * 1. Loading a binary file (PDF) + * 2. Analyzing the document asynchronously + * 3. Extracting markdown content + * 4. Accessing document properties (pages, tables, etc.) + */ +public class Sample01_AnalyzeBinaryAsync extends ContentUnderstandingClientTestBase { + + @Test + public void testAnalyzeBinaryAsync() throws IOException { + + // Load the sample file + String filePath = "src/test/resources/sample_invoice.pdf"; + Path path = Paths.get(filePath); + + byte[] fileBytes; + BinaryData binaryData; + boolean hasRealFile = Files.exists(path); + + // Check if sample file exists + fileBytes = Files.readAllBytes(path); + binaryData = BinaryData.fromBytes(fileBytes); + + // BEGIN:ContentUnderstandingAnalyzeBinaryAsync + // Use the simplified beginAnalyzeBinary overload - contentType defaults to "application/octet-stream" + // For PDFs, you can also explicitly specify "application/pdf" using the full method signature + PollerFlux operation + = contentUnderstandingAsyncClient.beginAnalyzeBinary("prebuilt-documentSearch", binaryData); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + AnalyzeResult result = operation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + // END:ContentUnderstandingAnalyzeBinaryAsync + + // BEGIN:Assertion_ContentUnderstandingAnalyzeBinaryAsync + if (hasRealFile) { + assertTrue(Files.exists(path), "Sample file not found at " + filePath); + } + assertTrue(fileBytes.length > 0, "File should not be empty"); + assertNotNull(binaryData, "Binary data should not be null"); + assertNotNull(operation, "Analysis operation should not be null"); + System.out.println("Analysis operation properties verified"); + + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result contents should not be null"); + System.out.println("Analysis result contains " + + (result.getContents() != null ? result.getContents().size() : 0) + " content(s)"); + // END:Assertion_ContentUnderstandingAnalyzeBinaryAsync + + // BEGIN:ContentUnderstandingExtractMarkdown + // A PDF file has only one content element even if it contains multiple pages + MediaContent content = null; + if (result.getContents() == null || result.getContents().isEmpty()) { + System.out.println("(No content returned from analysis)"); + } else { + content = result.getContents().get(0); + if (content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out.println(content.getMarkdown()); + } else { + System.out.println("(No markdown content available)"); + } + } + // END:ContentUnderstandingExtractMarkdown + + // BEGIN:Assertion_ContentUnderstandingExtractMarkdown + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, result.getContents().size(), "PDF file should have exactly one content element"); + assertNotNull(content, "Content should not be null"); + assertTrue(content instanceof MediaContent, "Content should be of type MediaContent"); + + // Only validate markdown content if we have a real file + if (hasRealFile && content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + assertFalse(content.getMarkdown().trim().isEmpty(), "Markdown content should not be just whitespace"); + System.out + .println("Markdown content extracted successfully (" + content.getMarkdown().length() + " characters)"); + } else { + System.out + .println("⚠️ Skipping markdown content validation (using minimal test PDF or no markdown available)"); + } + // END:Assertion_ContentUnderstandingExtractMarkdown + + // BEGIN:ContentUnderstandingAccessDocumentProperties + // Check if this is document content to access document-specific properties + if (content instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) content; + System.out.println("Document type: " + + (documentContent.getMimeType() != null ? documentContent.getMimeType() : "(unknown)")); + System.out.println("Start page: " + documentContent.getStartPageNumber()); + System.out.println("End page: " + documentContent.getEndPageNumber()); + System.out.println( + "Total pages: " + (documentContent.getEndPageNumber() - documentContent.getStartPageNumber() + 1)); + + // Check for pages + if (documentContent.getPages() != null && !documentContent.getPages().isEmpty()) { + System.out.println("Number of pages: " + documentContent.getPages().size()); + for (DocumentPage page : documentContent.getPages()) { + String unit = documentContent.getUnit() != null ? documentContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } + + // Check for tables + if (documentContent.getTables() != null && !documentContent.getTables().isEmpty()) { + System.out.println("Number of tables: " + documentContent.getTables().size()); + int tableCounter = 1; + for (DocumentTable table : documentContent.getTables()) { + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns"); + tableCounter++; + } + } + } else { + // Content is not DocumentContent - verify it's MediaContent + assertTrue(content instanceof MediaContent, "Content should be MediaContent when not DocumentContent"); + System.out.println("Content is MediaContent (not document-specific), skipping document properties"); + } + // END:ContentUnderstandingAccessDocumentProperties + + // BEGIN:Assertion_ContentUnderstandingAccessDocumentProperties + assertNotNull(content, "Content should not be null for document properties validation"); + + if (content instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) content; + + // Validate MIME type + assertNotNull(docContent.getMimeType(), "MIME type should not be null"); + assertFalse(docContent.getMimeType().trim().isEmpty(), "MIME type should not be empty"); + assertEquals("application/pdf", docContent.getMimeType(), "MIME type should be application/pdf"); + System.out.println("MIME type verified: " + docContent.getMimeType()); + + // Validate page numbers + assertTrue(docContent.getStartPageNumber() >= 1, "Start page should be >= 1"); + assertTrue(docContent.getEndPageNumber() >= docContent.getStartPageNumber(), + "End page should be >= start page"); + int totalPages = docContent.getEndPageNumber() - docContent.getStartPageNumber() + 1; + assertTrue(totalPages > 0, "Total pages should be positive"); + System.out.println("Page range verified: " + docContent.getStartPageNumber() + " to " + + docContent.getEndPageNumber() + " (" + totalPages + " pages)"); + + // Validate pages collection + if (docContent.getPages() != null && !docContent.getPages().isEmpty()) { + assertTrue(docContent.getPages().size() > 0, "Pages collection should not be empty when not null"); + assertEquals(totalPages, docContent.getPages().size(), + "Pages collection count should match calculated total pages"); + System.out.println("Pages collection verified: " + docContent.getPages().size() + " pages"); + + // Track page numbers to ensure they're sequential and unique + Set pageNumbers = new HashSet<>(); + + for (DocumentPage page : docContent.getPages()) { + assertNotNull(page, "Page object should not be null"); + assertTrue(page.getPageNumber() >= 1, "Page number should be >= 1"); + assertTrue( + page.getPageNumber() >= docContent.getStartPageNumber() + && page.getPageNumber() <= docContent.getEndPageNumber(), + "Page number " + page.getPageNumber() + " should be within document range [" + + docContent.getStartPageNumber() + ", " + docContent.getEndPageNumber() + "]"); + assertTrue(page.getWidth() > 0, + "Page " + page.getPageNumber() + " width should be > 0, but was " + page.getWidth()); + assertTrue(page.getHeight() > 0, + "Page " + page.getPageNumber() + " height should be > 0, but was " + page.getHeight()); + + // Ensure page numbers are unique + assertTrue(pageNumbers.add(page.getPageNumber()), + "Page number " + page.getPageNumber() + " appears multiple times"); + + String unit = docContent.getUnit() != null ? docContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } else { + System.out.println("⚠️ No pages collection available in document content"); + } + + // Validate tables collection + if (docContent.getTables() != null && !docContent.getTables().isEmpty()) { + assertTrue(docContent.getTables().size() > 0, "Tables collection should not be empty when not null"); + System.out.println("Tables collection verified: " + docContent.getTables().size() + " tables"); + + int tableCounter = 1; + for (DocumentTable table : docContent.getTables()) { + assertNotNull(table, "Table " + tableCounter + " should not be null"); + assertTrue(table.getRowCount() > 0, + "Table " + tableCounter + " should have at least 1 row, but had " + table.getRowCount()); + assertTrue(table.getColumnCount() > 0, + "Table " + tableCounter + " should have at least 1 column, but had " + table.getColumnCount()); + + // Validate table cells if available + if (table.getCells() != null) { + assertTrue(table.getCells().size() > 0, + "Table " + tableCounter + " cells collection should not be empty when not null"); + + for (DocumentTableCell cell : table.getCells()) { + assertNotNull(cell, "Table cell should not be null"); + assertTrue(cell.getRowIndex() >= 0 && cell.getRowIndex() < table.getRowCount(), + "Cell row index " + cell.getRowIndex() + " should be within table row count " + + table.getRowCount()); + assertTrue(cell.getColumnIndex() >= 0 && cell.getColumnIndex() < table.getColumnCount(), + "Cell column index " + cell.getColumnIndex() + " should be within table column count " + + table.getColumnCount()); + assertTrue(cell.getRowSpan() >= 1, + "Cell row span should be >= 1, but was " + cell.getRowSpan()); + assertTrue(cell.getColumnSpan() >= 1, + "Cell column span should be >= 1, but was " + cell.getColumnSpan()); + } + } + + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns" + + (table.getCells() != null ? " (" + table.getCells().size() + " cells)" : "")); + tableCounter++; + } + } else { + System.out.println("No tables found in document content"); + } + + System.out.println("All document properties validated successfully"); + } else { + // Content is not DocumentContent - validate alternative types + assertTrue(content instanceof MediaContent, + "Content should be MediaContent when not DocumentContent, but got " + + (content != null ? content.getClass().getSimpleName() : "null")); + System.out.println("Content is not DocumentContent type, skipping document-specific validations"); + System.out.println("⚠️ Content type: " + content.getClass().getSimpleName() + " (MediaContent validated)"); + } + // END:Assertion_ContentUnderstandingAccessDocumentProperties + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample02_AnalyzeUrl.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample02_AnalyzeUrl.java new file mode 100644 index 000000000000..08b057aa77f6 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample02_AnalyzeUrl.java @@ -0,0 +1,405 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.AudioVisualContent; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentPage; +import com.azure.ai.contentunderstanding.models.DocumentTable; +import com.azure.ai.contentunderstanding.models.DocumentTableCell; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.ai.contentunderstanding.models.TranscriptPhrase; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Sample demonstrating how to analyze documents from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to a document + * 2. Analyzing the document + * 3. Extracting markdown content + * 4. Accessing document properties (pages, tables, etc.) + */ +public class Sample02_AnalyzeUrl extends ContentUnderstandingClientTestBase { + + @Test + public void testAnalyzeUrl() { + + // BEGIN:ContentUnderstandingAnalyzeUrl + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + SyncPoller operation + = contentUnderstandingClient.beginAnalyze("prebuilt-documentSearch", Arrays.asList(input)); + + AnalyzeResult result = operation.getFinalResult(); + // END:ContentUnderstandingAnalyzeUrl + + // BEGIN:Assertion_ContentUnderstandingAnalyzeUrl + assertNotNull(uriSource, "URI source should not be null"); + assertNotNull(operation, "Analysis operation should not be null"); + assertTrue(operation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + System.out.println("Analysis operation properties verified"); + + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result contents should not be null"); + System.out.println("Analysis result contains " + + (result.getContents() != null ? result.getContents().size() : 0) + " content(s)"); + // END:Assertion_ContentUnderstandingAnalyzeUrl + + // A PDF file has only one content element even if it contains multiple pages + MediaContent content = null; + if (result.getContents() == null || result.getContents().isEmpty()) { + System.out.println("(No content returned from analysis)"); + } else { + content = result.getContents().get(0); + if (content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out.println(content.getMarkdown()); + } else { + System.out.println("(No markdown content available)"); + } + } + + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, result.getContents().size(), "PDF file should have exactly one content element"); + assertNotNull(content, "Content should not be null"); + assertTrue(content instanceof MediaContent, "Content should be of type MediaContent"); + + if (content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + assertFalse(content.getMarkdown().trim().isEmpty(), "Markdown content should not be just whitespace"); + System.out + .println("Markdown content extracted successfully (" + content.getMarkdown().length() + " characters)"); + } + + // Check if this is document content to access document-specific properties + if (content instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) content; + System.out.println("Document type: " + + (documentContent.getMimeType() != null ? documentContent.getMimeType() : "(unknown)")); + System.out.println("Start page: " + documentContent.getStartPageNumber()); + System.out.println("End page: " + documentContent.getEndPageNumber()); + System.out.println( + "Total pages: " + (documentContent.getEndPageNumber() - documentContent.getStartPageNumber() + 1)); + + // Check for pages + if (documentContent.getPages() != null && !documentContent.getPages().isEmpty()) { + System.out.println("Number of pages: " + documentContent.getPages().size()); + for (DocumentPage page : documentContent.getPages()) { + String unit = documentContent.getUnit() != null ? documentContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } + + // Check for tables + if (documentContent.getTables() != null && !documentContent.getTables().isEmpty()) { + System.out.println("Number of tables: " + documentContent.getTables().size()); + int tableCounter = 1; + for (DocumentTable table : documentContent.getTables()) { + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns"); + tableCounter++; + } + } + } else { + // Content is not DocumentContent - verify it's MediaContent + assertTrue(content instanceof MediaContent, "Content should be MediaContent when not DocumentContent"); + System.out.println("Content is MediaContent (not document-specific), skipping document properties"); + } + + assertNotNull(content, "Content should not be null for document properties validation"); + + if (content instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) content; + + // Validate MIME type + assertNotNull(docContent.getMimeType(), "MIME type should not be null"); + assertFalse(docContent.getMimeType().trim().isEmpty(), "MIME type should not be empty"); + assertEquals("application/pdf", docContent.getMimeType(), "MIME type should be application/pdf"); + System.out.println("MIME type verified: " + docContent.getMimeType()); + + // Validate page numbers + assertTrue(docContent.getStartPageNumber() >= 1, "Start page should be >= 1"); + assertTrue(docContent.getEndPageNumber() >= docContent.getStartPageNumber(), + "End page should be >= start page"); + int totalPages = docContent.getEndPageNumber() - docContent.getStartPageNumber() + 1; + assertTrue(totalPages > 0, "Total pages should be positive"); + System.out.println("Page range verified: " + docContent.getStartPageNumber() + " to " + + docContent.getEndPageNumber() + " (" + totalPages + " pages)"); + + // Validate pages collection + if (docContent.getPages() != null && !docContent.getPages().isEmpty()) { + assertTrue(docContent.getPages().size() > 0, "Pages collection should not be empty when not null"); + assertEquals(totalPages, docContent.getPages().size(), + "Pages collection count should match calculated total pages"); + System.out.println("Pages collection verified: " + docContent.getPages().size() + " pages"); + + // Track page numbers to ensure they're sequential and unique + Set pageNumbers = new HashSet<>(); + + for (DocumentPage page : docContent.getPages()) { + assertNotNull(page, "Page object should not be null"); + assertTrue(page.getPageNumber() >= 1, "Page number should be >= 1"); + assertTrue( + page.getPageNumber() >= docContent.getStartPageNumber() + && page.getPageNumber() <= docContent.getEndPageNumber(), + "Page number " + page.getPageNumber() + " should be within document range [" + + docContent.getStartPageNumber() + ", " + docContent.getEndPageNumber() + "]"); + assertTrue(page.getWidth() > 0, + "Page " + page.getPageNumber() + " width should be > 0, but was " + page.getWidth()); + assertTrue(page.getHeight() > 0, + "Page " + page.getPageNumber() + " height should be > 0, but was " + page.getHeight()); + + // Ensure page numbers are unique + assertTrue(pageNumbers.add(page.getPageNumber()), + "Page number " + page.getPageNumber() + " appears multiple times"); + + String unit = docContent.getUnit() != null ? docContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } else { + System.out.println("⚠️ No pages collection available in document content"); + } + + // Validate tables collection + if (docContent.getTables() != null && !docContent.getTables().isEmpty()) { + assertTrue(docContent.getTables().size() > 0, "Tables collection should not be empty when not null"); + System.out.println("Tables collection verified: " + docContent.getTables().size() + " tables"); + + int tableCounter = 1; + for (DocumentTable table : docContent.getTables()) { + assertNotNull(table, "Table " + tableCounter + " should not be null"); + assertTrue(table.getRowCount() > 0, + "Table " + tableCounter + " should have at least 1 row, but had " + table.getRowCount()); + assertTrue(table.getColumnCount() > 0, + "Table " + tableCounter + " should have at least 1 column, but had " + table.getColumnCount()); + + // Validate table cells if available + if (table.getCells() != null) { + assertTrue(table.getCells().size() > 0, + "Table " + tableCounter + " cells collection should not be empty when not null"); + + for (DocumentTableCell cell : table.getCells()) { + assertNotNull(cell, "Table cell should not be null"); + assertTrue(cell.getRowIndex() >= 0 && cell.getRowIndex() < table.getRowCount(), + "Cell row index " + cell.getRowIndex() + " should be within table row count " + + table.getRowCount()); + assertTrue(cell.getColumnIndex() >= 0 && cell.getColumnIndex() < table.getColumnCount(), + "Cell column index " + cell.getColumnIndex() + " should be within table column count " + + table.getColumnCount()); + } + } + + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns" + + (table.getCells() != null ? " (" + table.getCells().size() + " cells)" : "")); + tableCounter++; + } + } else { + System.out.println("⚠️ No tables found in document content"); + } + + System.out.println("All document properties validated successfully"); + } else { + // Content is not DocumentContent - validate alternative types + assertTrue(content instanceof MediaContent, + "Content should be MediaContent when not DocumentContent, but got " + + (content != null ? content.getClass().getSimpleName() : "null")); + System.out.println("⚠️ Content is not DocumentContent type, skipping document-specific validations"); + System.out.println("⚠️ Content type: " + content.getClass().getSimpleName() + " (MediaContent validated)"); + } + } + + @Test + public void testAnalyzeVideoUrl() { + // BEGIN:ContentUnderstandingAnalyzeVideoUrl + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/videos/sdk_samples/FlightSimulator.mp4"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + SyncPoller operation + = contentUnderstandingClient.beginAnalyze("prebuilt-videoSearch", Arrays.asList(input)); + + AnalyzeResult result = operation.getFinalResult(); + + // prebuilt-videoSearch can detect video segments, so we should iterate through all segments + int segmentIndex = 1; + for (MediaContent media : result.getContents()) { + // Cast MediaContent to AudioVisualContent to access audio/visual-specific properties + // AudioVisualContent derives from MediaContent and provides additional properties + // to access full information about audio/video, including timing, transcript phrases, and many others + AudioVisualContent videoContent = (AudioVisualContent) media; + System.out.println("--- Segment " + segmentIndex + " ---"); + System.out.println("Markdown:"); + System.out.println(videoContent.getMarkdown()); + + String summary = videoContent.getFields() != null && videoContent.getFields().containsKey("Summary") + ? (videoContent.getFields().get("Summary").getValue() != null + ? videoContent.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + + System.out.println( + "Start: " + videoContent.getStartTimeMs() + " ms, End: " + videoContent.getEndTimeMs() + " ms"); + System.out.println("Frame size: " + videoContent.getWidth() + " x " + videoContent.getHeight()); + + System.out.println("---------------------"); + segmentIndex++; + } + // END:ContentUnderstandingAnalyzeVideoUrl + + // BEGIN:Assertion_ContentUnderstandingAnalyzeVideoUrl + assertNotNull(operation, "Analysis operation should not be null"); + assertTrue(operation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result contents should not be null"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + + // Verify all contents are AudioVisualContent + for (MediaContent content : result.getContents()) { + assertTrue(content instanceof AudioVisualContent, "Video analysis should return audio/visual content."); + AudioVisualContent avContent = (AudioVisualContent) content; + assertNotNull(avContent.getFields(), "AudioVisualContent should have fields"); + assertTrue(avContent.getFields().containsKey("Summary"), "Video segment should have Summary field"); + assertNotNull(avContent.getFields().get("Summary").getValue(), "Summary value should not be null"); + String summaryStr = avContent.getFields().get("Summary").getValue().toString(); + assertFalse(summaryStr.trim().isEmpty(), "Summary should not be empty"); + } + System.out.println("Video analysis validation completed successfully"); + // END:Assertion_ContentUnderstandingAnalyzeVideoUrl + } + + @Test + public void testAnalyzeAudioUrl() { + // BEGIN:ContentUnderstandingAnalyzeAudioUrl + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/audio/callCenterRecording.mp3"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + SyncPoller operation + = contentUnderstandingClient.beginAnalyze("prebuilt-audioSearch", Arrays.asList(input)); + + AnalyzeResult result = operation.getFinalResult(); + + // Cast MediaContent to AudioVisualContent to access audio/visual-specific properties + // AudioVisualContent derives from MediaContent and provides additional properties + // to access full information about audio/video, including timing, transcript phrases, and many others + AudioVisualContent audioContent = (AudioVisualContent) result.getContents().get(0); + System.out.println("Markdown:"); + System.out.println(audioContent.getMarkdown()); + + String summary = audioContent.getFields() != null && audioContent.getFields().containsKey("Summary") + ? (audioContent.getFields().get("Summary").getValue() != null + ? audioContent.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + + // Example: Access an additional field in AudioVisualContent (transcript phrases) + List transcriptPhrases = audioContent.getTranscriptPhrases(); + if (transcriptPhrases != null && !transcriptPhrases.isEmpty()) { + System.out.println("Transcript (first two phrases):"); + int count = 0; + for (TranscriptPhrase phrase : transcriptPhrases) { + if (count >= 2) { + break; + } + System.out + .println(" [" + phrase.getSpeaker() + "] " + phrase.getStartTimeMs() + " ms: " + phrase.getText()); + count++; + } + } + // END:ContentUnderstandingAnalyzeAudioUrl + + // BEGIN:Assertion_ContentUnderstandingAnalyzeAudioUrl + assertNotNull(operation, "Analysis operation should not be null"); + assertTrue(operation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result contents should not be null"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + + // Verify content is AudioVisualContent + assertTrue(audioContent instanceof AudioVisualContent, "Audio analysis should return audio/visual content."); + + // Verify all contents have Summary field + for (MediaContent content : result.getContents()) { + assertTrue(content instanceof AudioVisualContent, "Audio analysis should return audio/visual content."); + AudioVisualContent avContent = (AudioVisualContent) content; + assertNotNull(avContent.getFields(), "AudioVisualContent should have fields"); + assertTrue(avContent.getFields().containsKey("Summary"), "Audio content should have Summary field"); + assertNotNull(avContent.getFields().get("Summary").getValue(), "Summary value should not be null"); + String summaryStr = avContent.getFields().get("Summary").getValue().toString(); + assertFalse(summaryStr.trim().isEmpty(), "Summary should not be empty"); + } + System.out.println("Audio analysis validation completed successfully"); + // END:Assertion_ContentUnderstandingAnalyzeAudioUrl + } + + @Test + public void testAnalyzeImageUrl() { + // BEGIN:ContentUnderstandingAnalyzeImageUrl + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/image/pieChart.jpg"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + SyncPoller operation + = contentUnderstandingClient.beginAnalyze("prebuilt-imageSearch", Arrays.asList(input)); + + AnalyzeResult result = operation.getFinalResult(); + + MediaContent content = result.getContents().get(0); + System.out.println("Markdown:"); + System.out.println(content.getMarkdown()); + + String summary = content.getFields() != null && content.getFields().containsKey("Summary") + ? (content.getFields().get("Summary").getValue() != null + ? content.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + // END:ContentUnderstandingAnalyzeImageUrl + + // BEGIN:Assertion_ContentUnderstandingAnalyzeImageUrl + assertNotNull(operation, "Analysis operation should not be null"); + assertTrue(operation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result contents should not be null"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + + // Verify content has Summary field + for (MediaContent mediaContent : result.getContents()) { + assertNotNull(mediaContent.getFields(), "Content should have fields"); + assertTrue(mediaContent.getFields().containsKey("Summary"), "Image content should have Summary field"); + assertNotNull(mediaContent.getFields().get("Summary").getValue(), "Summary value should not be null"); + String summaryStr = mediaContent.getFields().get("Summary").getValue().toString(); + assertFalse(summaryStr.trim().isEmpty(), "Summary should not be empty"); + } + System.out.println("Image analysis validation completed successfully"); + // END:Assertion_ContentUnderstandingAnalyzeImageUrl + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample02_AnalyzeUrlAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample02_AnalyzeUrlAsync.java new file mode 100644 index 000000000000..cb779e36234c --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample02_AnalyzeUrlAsync.java @@ -0,0 +1,437 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.AudioVisualContent; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentPage; +import com.azure.ai.contentunderstanding.models.DocumentTable; +import com.azure.ai.contentunderstanding.models.DocumentTableCell; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.ai.contentunderstanding.models.TranscriptPhrase; +import com.azure.core.util.polling.PollerFlux; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Async sample demonstrating how to analyze documents from URL using Content Understanding service. + * This sample shows: + * 1. Providing a URL to a document + * 2. Analyzing the document asynchronously + * 3. Extracting markdown content + * 4. Accessing document properties (pages, tables, etc.) + */ +public class Sample02_AnalyzeUrlAsync extends ContentUnderstandingClientTestBase { + + @Test + public void testAnalyzeUrlAsync() { + + // BEGIN:ContentUnderstandingAnalyzeUrlAsyncAsync + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + PollerFlux operation + = contentUnderstandingAsyncClient.beginAnalyze("prebuilt-documentSearch", Arrays.asList(input)); + + // Use reactive pattern: chain operations using flatMap, doOnNext, doOnError + // In a real application, you would use subscribe() instead of block() + AnalyzeResult result = operation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + // END:ContentUnderstandingAnalyzeUrlAsyncAsync + + // BEGIN:Assertion_ContentUnderstandingAnalyzeUrlAsyncAsync + assertNotNull(uriSource, "URI source should not be null"); + assertNotNull(operation, "Analysis operation should not be null"); + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result contents should not be null"); + System.out.println("Analysis operation properties verified"); + System.out.println("Analysis result contains " + + (result.getContents() != null ? result.getContents().size() : 0) + " content(s)"); + // END:Assertion_ContentUnderstandingAnalyzeUrlAsyncAsync + + // A PDF file has only one content element even if it contains multiple pages + MediaContent content = null; + if (result.getContents() == null || result.getContents().isEmpty()) { + System.out.println("(No content returned from analysis)"); + } else { + content = result.getContents().get(0); + if (content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + System.out.println(content.getMarkdown()); + } else { + System.out.println("(No markdown content available)"); + } + } + + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, result.getContents().size(), "PDF file should have exactly one content element"); + assertNotNull(content, "Content should not be null"); + assertTrue(content instanceof MediaContent, "Content should be of type MediaContent"); + + if (content.getMarkdown() != null && !content.getMarkdown().isEmpty()) { + assertFalse(content.getMarkdown().trim().isEmpty(), "Markdown content should not be just whitespace"); + System.out + .println("Markdown content extracted successfully (" + content.getMarkdown().length() + " characters)"); + } + + // Check if this is document content to access document-specific properties + if (content instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) content; + System.out.println("Document type: " + + (documentContent.getMimeType() != null ? documentContent.getMimeType() : "(unknown)")); + System.out.println("Start page: " + documentContent.getStartPageNumber()); + System.out.println("End page: " + documentContent.getEndPageNumber()); + System.out.println( + "Total pages: " + (documentContent.getEndPageNumber() - documentContent.getStartPageNumber() + 1)); + + // Check for pages + if (documentContent.getPages() != null && !documentContent.getPages().isEmpty()) { + System.out.println("Number of pages: " + documentContent.getPages().size()); + for (DocumentPage page : documentContent.getPages()) { + String unit = documentContent.getUnit() != null ? documentContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } + + // Check for tables + if (documentContent.getTables() != null && !documentContent.getTables().isEmpty()) { + System.out.println("Number of tables: " + documentContent.getTables().size()); + int tableCounter = 1; + for (DocumentTable table : documentContent.getTables()) { + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns"); + tableCounter++; + } + } + } else { + // Content is not DocumentContent - verify it's MediaContent + assertTrue(content instanceof MediaContent, "Content should be MediaContent when not DocumentContent"); + System.out.println("Content is MediaContent (not document-specific), skipping document properties"); + } + + assertNotNull(content, "Content should not be null for document properties validation"); + + if (content instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) content; + + // Validate MIME type + assertNotNull(docContent.getMimeType(), "MIME type should not be null"); + assertFalse(docContent.getMimeType().trim().isEmpty(), "MIME type should not be empty"); + assertEquals("application/pdf", docContent.getMimeType(), "MIME type should be application/pdf"); + System.out.println("MIME type verified: " + docContent.getMimeType()); + + // Validate page numbers + assertTrue(docContent.getStartPageNumber() >= 1, "Start page should be >= 1"); + assertTrue(docContent.getEndPageNumber() >= docContent.getStartPageNumber(), + "End page should be >= start page"); + int totalPages = docContent.getEndPageNumber() - docContent.getStartPageNumber() + 1; + assertTrue(totalPages > 0, "Total pages should be positive"); + System.out.println("Page range verified: " + docContent.getStartPageNumber() + " to " + + docContent.getEndPageNumber() + " (" + totalPages + " pages)"); + + // Validate pages collection + if (docContent.getPages() != null && !docContent.getPages().isEmpty()) { + assertTrue(docContent.getPages().size() > 0, "Pages collection should not be empty when not null"); + assertEquals(totalPages, docContent.getPages().size(), + "Pages collection count should match calculated total pages"); + System.out.println("Pages collection verified: " + docContent.getPages().size() + " pages"); + + // Track page numbers to ensure they're sequential and unique + Set pageNumbers = new HashSet<>(); + + for (DocumentPage page : docContent.getPages()) { + assertNotNull(page, "Page object should not be null"); + assertTrue(page.getPageNumber() >= 1, "Page number should be >= 1"); + assertTrue( + page.getPageNumber() >= docContent.getStartPageNumber() + && page.getPageNumber() <= docContent.getEndPageNumber(), + "Page number " + page.getPageNumber() + " should be within document range [" + + docContent.getStartPageNumber() + ", " + docContent.getEndPageNumber() + "]"); + assertTrue(page.getWidth() > 0, + "Page " + page.getPageNumber() + " width should be > 0, but was " + page.getWidth()); + assertTrue(page.getHeight() > 0, + "Page " + page.getPageNumber() + " height should be > 0, but was " + page.getHeight()); + + // Ensure page numbers are unique + assertTrue(pageNumbers.add(page.getPageNumber()), + "Page number " + page.getPageNumber() + " appears multiple times"); + + String unit = docContent.getUnit() != null ? docContent.getUnit().toString() : "units"; + System.out.println(" Page " + page.getPageNumber() + ": " + page.getWidth() + " x " + + page.getHeight() + " " + unit); + } + } else { + System.out.println("⚠️ No pages collection available in document content"); + } + + // Validate tables collection + if (docContent.getTables() != null && !docContent.getTables().isEmpty()) { + assertTrue(docContent.getTables().size() > 0, "Tables collection should not be empty when not null"); + System.out.println("Tables collection verified: " + docContent.getTables().size() + " tables"); + + int tableCounter = 1; + for (DocumentTable table : docContent.getTables()) { + assertNotNull(table, "Table " + tableCounter + " should not be null"); + assertTrue(table.getRowCount() > 0, + "Table " + tableCounter + " should have at least 1 row, but had " + table.getRowCount()); + assertTrue(table.getColumnCount() > 0, + "Table " + tableCounter + " should have at least 1 column, but had " + table.getColumnCount()); + + // Validate table cells if available + if (table.getCells() != null) { + assertTrue(table.getCells().size() > 0, + "Table " + tableCounter + " cells collection should not be empty when not null"); + + for (DocumentTableCell cell : table.getCells()) { + assertNotNull(cell, "Table cell should not be null"); + assertTrue(cell.getRowIndex() >= 0 && cell.getRowIndex() < table.getRowCount(), + "Cell row index " + cell.getRowIndex() + " should be within table row count " + + table.getRowCount()); + assertTrue(cell.getColumnIndex() >= 0 && cell.getColumnIndex() < table.getColumnCount(), + "Cell column index " + cell.getColumnIndex() + " should be within table column count " + + table.getColumnCount()); + } + } + + System.out.println(" Table " + tableCounter + ": " + table.getRowCount() + " rows x " + + table.getColumnCount() + " columns" + + (table.getCells() != null ? " (" + table.getCells().size() + " cells)" : "")); + tableCounter++; + } + } else { + System.out.println("⚠️ No tables found in document content"); + } + + System.out.println("All document properties validated successfully"); + } else { + // Content is not DocumentContent - validate alternative types + assertTrue(content instanceof MediaContent, + "Content should be MediaContent when not DocumentContent, but got " + + (content != null ? content.getClass().getSimpleName() : "null")); + System.out.println("⚠️ Content is not DocumentContent type, skipping document-specific validations"); + System.out.println("⚠️ Content type: " + content.getClass().getSimpleName() + " (MediaContent validated)"); + } + } + + @Test + public void testAnalyzeVideoUrlAsync() { + // BEGIN:ContentUnderstandingAnalyzeVideoUrlAsyncAsync + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/videos/sdk_samples/FlightSimulator.mp4"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + PollerFlux operation + = contentUnderstandingAsyncClient.beginAnalyze("prebuilt-videoSearch", Arrays.asList(input)); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + AnalyzeResult result = operation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + // prebuilt-videoSearch can detect video segments, so we should iterate through all segments + int segmentIndex = 1; + for (MediaContent media : result.getContents()) { + // Cast MediaContent to AudioVisualContent to access audio/visual-specific properties + // AudioVisualContent derives from MediaContent and provides additional properties + // to access full information about audio/video, including timing, transcript phrases, and many others + AudioVisualContent videoContent = (AudioVisualContent) media; + System.out.println("--- Segment " + segmentIndex + " ---"); + System.out.println("Markdown:"); + System.out.println(videoContent.getMarkdown()); + + String summary = videoContent.getFields() != null && videoContent.getFields().containsKey("Summary") + ? (videoContent.getFields().get("Summary").getValue() != null + ? videoContent.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + + System.out.println( + "Start: " + videoContent.getStartTimeMs() + " ms, End: " + videoContent.getEndTimeMs() + " ms"); + System.out.println("Frame size: " + videoContent.getWidth() + " x " + videoContent.getHeight()); + + System.out.println("---------------------"); + segmentIndex++; + } + // END:ContentUnderstandingAnalyzeVideoUrlAsyncAsync + + // BEGIN:Assertion_ContentUnderstandingAnalyzeVideoUrlAsyncAsync + assertNotNull(operation, "Analysis operation should not be null"); + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result contents should not be null"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + + // Verify all contents are AudioVisualContent + for (MediaContent content : result.getContents()) { + assertTrue(content instanceof AudioVisualContent, "Video analysis should return audio/visual content."); + AudioVisualContent avContent = (AudioVisualContent) content; + assertNotNull(avContent.getFields(), "AudioVisualContent should have fields"); + assertTrue(avContent.getFields().containsKey("Summary"), "Video segment should have Summary field"); + assertNotNull(avContent.getFields().get("Summary").getValue(), "Summary value should not be null"); + String summaryStr = avContent.getFields().get("Summary").getValue().toString(); + assertFalse(summaryStr.trim().isEmpty(), "Summary should not be empty"); + } + System.out.println("Video analysis validation completed successfully"); + // END:Assertion_ContentUnderstandingAnalyzeVideoUrlAsyncAsync + } + + @Test + public void testAnalyzeAudioUrlAsync() { + // BEGIN:ContentUnderstandingAnalyzeAudioUrlAsyncAsync + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/audio/callCenterRecording.mp3"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + PollerFlux operation + = contentUnderstandingAsyncClient.beginAnalyze("prebuilt-audioSearch", Arrays.asList(input)); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + AnalyzeResult result = operation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + // Cast MediaContent to AudioVisualContent to access audio/visual-specific properties + // AudioVisualContent derives from MediaContent and provides additional properties + // to access full information about audio/video, including timing, transcript phrases, and many others + AudioVisualContent audioContent = (AudioVisualContent) result.getContents().get(0); + System.out.println("Markdown:"); + System.out.println(audioContent.getMarkdown()); + + String summary = audioContent.getFields() != null && audioContent.getFields().containsKey("Summary") + ? (audioContent.getFields().get("Summary").getValue() != null + ? audioContent.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + + // Example: Access an additional field in AudioVisualContent (transcript phrases) + List transcriptPhrases = audioContent.getTranscriptPhrases(); + if (transcriptPhrases != null && !transcriptPhrases.isEmpty()) { + System.out.println("Transcript (first two phrases):"); + int count = 0; + for (TranscriptPhrase phrase : transcriptPhrases) { + if (count >= 2) { + break; + } + System.out + .println(" [" + phrase.getSpeaker() + "] " + phrase.getStartTimeMs() + " ms: " + phrase.getText()); + count++; + } + } + // END:ContentUnderstandingAnalyzeAudioUrlAsyncAsync + + // BEGIN:Assertion_ContentUnderstandingAnalyzeAudioUrlAsyncAsync + assertNotNull(operation, "Analysis operation should not be null"); + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result contents should not be null"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + + // Verify content is AudioVisualContent + assertTrue(audioContent instanceof AudioVisualContent, "Audio analysis should return audio/visual content."); + + // Verify all contents have Summary field + for (MediaContent content : result.getContents()) { + assertTrue(content instanceof AudioVisualContent, "Audio analysis should return audio/visual content."); + AudioVisualContent avContent = (AudioVisualContent) content; + assertNotNull(avContent.getFields(), "AudioVisualContent should have fields"); + assertTrue(avContent.getFields().containsKey("Summary"), "Audio content should have Summary field"); + assertNotNull(avContent.getFields().get("Summary").getValue(), "Summary value should not be null"); + String summaryStr = avContent.getFields().get("Summary").getValue().toString(); + assertFalse(summaryStr.trim().isEmpty(), "Summary should not be empty"); + } + System.out.println("Audio analysis validation completed successfully"); + // END:Assertion_ContentUnderstandingAnalyzeAudioUrlAsyncAsync + } + + @Test + public void testAnalyzeImageUrlAsync() { + // BEGIN:ContentUnderstandingAnalyzeImageUrlAsyncAsync + String uriSource + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-assets/main/image/pieChart.jpg"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(uriSource); + + PollerFlux operation + = contentUnderstandingAsyncClient.beginAnalyze("prebuilt-imageSearch", Arrays.asList(input)); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + AnalyzeResult result = operation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + MediaContent content = result.getContents().get(0); + System.out.println("Markdown:"); + System.out.println(content.getMarkdown()); + + String summary = content.getFields() != null && content.getFields().containsKey("Summary") + ? (content.getFields().get("Summary").getValue() != null + ? content.getFields().get("Summary").getValue().toString() + : "") + : ""; + System.out.println("Summary: " + summary); + // END:ContentUnderstandingAnalyzeImageUrlAsyncAsync + + // BEGIN:Assertion_ContentUnderstandingAnalyzeImageUrlAsyncAsync + assertNotNull(operation, "Analysis operation should not be null"); + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result contents should not be null"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + + // Verify content has Summary field + for (MediaContent mediaContent : result.getContents()) { + assertNotNull(mediaContent.getFields(), "Content should have fields"); + assertTrue(mediaContent.getFields().containsKey("Summary"), "Image content should have Summary field"); + assertNotNull(mediaContent.getFields().get("Summary").getValue(), "Summary value should not be null"); + String summaryStr = mediaContent.getFields().get("Summary").getValue().toString(); + assertFalse(summaryStr.trim().isEmpty(), "Summary should not be empty"); + } + System.out.println("Image analysis validation completed successfully"); + // END:Assertion_ContentUnderstandingAnalyzeImageUrlAsyncAsync + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample03_AnalyzeInvoice.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample03_AnalyzeInvoice.java new file mode 100644 index 000000000000..5e6ade4fdbb4 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample03_AnalyzeInvoice.java @@ -0,0 +1,204 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ArrayField; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.ContentSpan; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.ai.contentunderstanding.models.ObjectField; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.Arrays; +import java.util.List; + +/** + * Sample demonstrating how to analyze invoices using Content Understanding service. + * This sample shows: + * 1. Analyzing an invoice document + * 2. Extracting structured invoice fields + * 3. Accessing nested object fields (TotalAmount) + * 4. Accessing array fields (LineItems) + * 5. Working with field confidence and source information + */ +public class Sample03_AnalyzeInvoice extends ContentUnderstandingClientTestBase { + + @Test + public void testAnalyzeInvoice() { + + // BEGIN:ContentUnderstandingAnalyzeInvoice + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String invoiceUrl + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(invoiceUrl); + + SyncPoller operation + = contentUnderstandingClient.beginAnalyze("prebuilt-invoice", Arrays.asList(input)); + + AnalyzeResult result = operation.getFinalResult(); + // END:ContentUnderstandingAnalyzeInvoice + + // BEGIN:Assertion_ContentUnderstandingAnalyzeInvoice + assertNotNull(invoiceUrl, "Invoice URL should not be null"); + assertNotNull(operation, "Analysis operation should not be null"); + assertTrue(operation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + System.out.println("Analysis operation properties verified"); + + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, result.getContents().size(), "Invoice should have exactly one content element"); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + // END:Assertion_ContentUnderstandingAnalyzeInvoice + + // BEGIN:ContentUnderstandingExtractInvoiceFields + // Get the document content (invoices are documents) + MediaContent firstContent = result.getContents().get(0); + if (firstContent instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) firstContent; + + // Print document unit information + // The unit indicates the measurement system used for coordinates in the source field + System.out.println("Document unit: " + + (documentContent.getUnit() != null ? documentContent.getUnit().toString() : "unknown")); + System.out.println( + "Pages: " + documentContent.getStartPageNumber() + " to " + documentContent.getEndPageNumber()); + System.out.println(); + + // Extract simple string fields using getValue() convenience method + // getValue() returns the typed value regardless of field type (StringField, NumberField, DateField, etc.) + ContentField customerNameField + = documentContent.getFields() != null ? documentContent.getFields().get("CustomerName") : null; + ContentField invoiceDateField + = documentContent.getFields() != null ? documentContent.getFields().get("InvoiceDate") : null; + + // Use getValue() instead of casting to specific types + // Note: getValue() returns the actual typed value - String, Number, LocalDate, etc. + String customerName = customerNameField != null ? (String) customerNameField.getValue() : null; + // InvoiceDate is a DateField, so getValue() returns LocalDate - convert to String for display + Object invoiceDateValue = invoiceDateField != null ? invoiceDateField.getValue() : null; + String invoiceDate = invoiceDateValue != null ? invoiceDateValue.toString() : null; + + System.out.println("Customer Name: " + (customerName != null ? customerName : "(None)")); + if (customerNameField != null) { + System.out.println(" Confidence: " + (customerNameField.getConfidence() != null + ? String.format("%.2f", customerNameField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (customerNameField.getSource() != null ? customerNameField.getSource() : "N/A")); + List spans = customerNameField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out + .println(" Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + System.out.println("Invoice Date: " + (invoiceDate != null ? invoiceDate : "(None)")); + if (invoiceDateField != null) { + System.out.println(" Confidence: " + (invoiceDateField.getConfidence() != null + ? String.format("%.2f", invoiceDateField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (invoiceDateField.getSource() != null ? invoiceDateField.getSource() : "N/A")); + List spans = invoiceDateField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out + .println(" Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + // Extract object fields (nested structures) using getFieldOrDefault() convenience method + // getFieldOrDefault() returns null if the field doesn't exist (safe access pattern) + ContentField totalAmountField + = documentContent.getFields() != null ? documentContent.getFields().get("TotalAmount") : null; + if (totalAmountField instanceof ObjectField) { + ObjectField totalAmountObj = (ObjectField) totalAmountField; + + // Use getFieldOrDefault() for safe nested field access + ContentField amountField = totalAmountObj.getFieldOrDefault("Amount"); + ContentField currencyField = totalAmountObj.getFieldOrDefault("CurrencyCode"); + + // Use getValue() instead of type-specific getters + Double amount = amountField != null ? (Double) amountField.getValue() : null; + String currency = currencyField != null ? (String) currencyField.getValue() : null; + + System.out.println("Total: " + (currency != null ? currency : "$") + + (amount != null ? String.format("%.2f", amount) : "(None)")); + if (totalAmountObj.getConfidence() != null) { + System.out.println(" Confidence: " + String.format("%.2f", totalAmountObj.getConfidence())); + } + if (totalAmountObj.getSource() != null && !totalAmountObj.getSource().isEmpty()) { + System.out.println(" Source: " + totalAmountObj.getSource()); + } + } + + // Extract array fields (collections like line items) using size() and get() convenience methods + ContentField lineItemsField + = documentContent.getFields() != null ? documentContent.getFields().get("LineItems") : null; + if (lineItemsField instanceof ArrayField) { + ArrayField lineItems = (ArrayField) lineItemsField; + // Use size() convenience method instead of getValueArray().size() + System.out.println("Line Items (" + lineItems.size() + "):"); + for (int i = 0; i < lineItems.size(); i++) { + // Use get(index) convenience method instead of getValueArray().get(i) + ContentField itemField = lineItems.get(i); + if (itemField instanceof ObjectField) { + ObjectField item = (ObjectField) itemField; + // Use getFieldOrDefault() for safe nested access + ContentField descField = item.getFieldOrDefault("Description"); + ContentField qtyField = item.getFieldOrDefault("Quantity"); + + // Use getValue() instead of type-specific getters + String description = descField != null ? (String) descField.getValue() : null; + Double quantity = qtyField != null ? (Double) qtyField.getValue() : null; + + System.out.println(" Item " + (i + 1) + ": " + (description != null ? description : "N/A") + + " (Qty: " + (quantity != null ? String.valueOf(quantity) : "N/A") + ")"); + if (item.getConfidence() != null) { + System.out.println(" Confidence: " + String.format("%.2f", item.getConfidence())); + } + } + } + } + } + // END:ContentUnderstandingExtractInvoiceFields + + // BEGIN:Assertion_ContentUnderstandingExtractInvoiceFields + MediaContent content = result.getContents().get(0); + assertNotNull(content, "Content should not be null"); + assertTrue(content instanceof DocumentContent, "Content should be of type DocumentContent"); + + if (content instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) content; + + // Verify basic document properties + assertTrue(docContent.getStartPageNumber() >= 1, "Start page should be >= 1"); + assertTrue(docContent.getEndPageNumber() >= docContent.getStartPageNumber(), + "End page should be >= start page"); + int totalPages = docContent.getEndPageNumber() - docContent.getStartPageNumber() + 1; + assertTrue(totalPages > 0, "Total pages should be positive"); + System.out.println("Document has " + totalPages + " page(s) from " + docContent.getStartPageNumber() + + " to " + docContent.getEndPageNumber()); + + System.out.println("All invoice fields validated successfully"); + } else { + // This should not happen given the assertTrue above, but handle it for completeness + fail("Content type validation failed: expected DocumentContent but got " + + (content != null ? content.getClass().getSimpleName() : "null")); + } + // END:Assertion_ContentUnderstandingExtractInvoiceFields + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample03_AnalyzeInvoiceAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample03_AnalyzeInvoiceAsync.java new file mode 100644 index 000000000000..b41714fefe04 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample03_AnalyzeInvoiceAsync.java @@ -0,0 +1,212 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ArrayField; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.ContentSpan; +import com.azure.ai.contentunderstanding.models.MediaContent; +import com.azure.ai.contentunderstanding.models.ObjectField; +import com.azure.core.util.polling.PollerFlux; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.Arrays; +import java.util.List; + +/** + * Async sample demonstrating how to analyze invoices using Content Understanding service. + * This sample shows: + * 1. Analyzing an invoice document asynchronously + * 2. Extracting structured invoice fields + * 3. Accessing nested object fields (TotalAmount) + * 4. Accessing array fields (LineItems) + * 5. Working with field confidence and source information + */ +public class Sample03_AnalyzeInvoiceAsync extends ContentUnderstandingClientTestBase { + + @Test + public void testAnalyzeInvoiceAsync() { + + // BEGIN:ContentUnderstandingAnalyzeInvoiceAsync + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String invoiceUrl + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(invoiceUrl); + + PollerFlux operation + = contentUnderstandingAsyncClient.beginAnalyze("prebuilt-invoice", Arrays.asList(input)); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + AnalyzeResult result = operation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + // END:ContentUnderstandingAnalyzeInvoiceAsync + + // BEGIN:Assertion_ContentUnderstandingAnalyzeInvoiceAsync + assertNotNull(invoiceUrl, "Invoice URL should not be null"); + assertNotNull(operation, "Analysis operation should not be null"); + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, result.getContents().size(), "Invoice should have exactly one content element"); + System.out.println("Analysis operation properties verified"); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + // END:Assertion_ContentUnderstandingAnalyzeInvoiceAsync + + // BEGIN:ContentUnderstandingExtractInvoiceFieldsAsync + // Get the document content (invoices are documents) + MediaContent firstContent = result.getContents().get(0); + if (firstContent instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) firstContent; + + // Print document unit information + // The unit indicates the measurement system used for coordinates in the source field + System.out.println("Document unit: " + + (documentContent.getUnit() != null ? documentContent.getUnit().toString() : "unknown")); + System.out.println( + "Pages: " + documentContent.getStartPageNumber() + " to " + documentContent.getEndPageNumber()); + System.out.println(); + + // Extract simple string fields using getValue() convenience method + // getValue() returns the typed value regardless of field type (StringField, NumberField, DateField, etc.) + ContentField customerNameField + = documentContent.getFields() != null ? documentContent.getFields().get("CustomerName") : null; + ContentField invoiceDateField + = documentContent.getFields() != null ? documentContent.getFields().get("InvoiceDate") : null; + + // Use getValue() instead of casting to specific types + // Note: getValue() returns the actual typed value - String, Number, LocalDate, etc. + String customerName = customerNameField != null ? (String) customerNameField.getValue() : null; + // InvoiceDate is a DateField, so getValue() returns LocalDate - convert to String for display + Object invoiceDateValue = invoiceDateField != null ? invoiceDateField.getValue() : null; + String invoiceDate = invoiceDateValue != null ? invoiceDateValue.toString() : null; + + System.out.println("Customer Name: " + (customerName != null ? customerName : "(None)")); + if (customerNameField != null) { + System.out.println(" Confidence: " + (customerNameField.getConfidence() != null + ? String.format("%.2f", customerNameField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (customerNameField.getSource() != null ? customerNameField.getSource() : "N/A")); + List spans = customerNameField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out + .println(" Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + System.out.println("Invoice Date: " + (invoiceDate != null ? invoiceDate : "(None)")); + if (invoiceDateField != null) { + System.out.println(" Confidence: " + (invoiceDateField.getConfidence() != null + ? String.format("%.2f", invoiceDateField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (invoiceDateField.getSource() != null ? invoiceDateField.getSource() : "N/A")); + List spans = invoiceDateField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out + .println(" Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + // Extract object fields (nested structures) using getFieldOrDefault() convenience method + // getFieldOrDefault() returns null if the field doesn't exist (safe access pattern) + ContentField totalAmountField + = documentContent.getFields() != null ? documentContent.getFields().get("TotalAmount") : null; + if (totalAmountField instanceof ObjectField) { + ObjectField totalAmountObj = (ObjectField) totalAmountField; + + // Use getFieldOrDefault() for safe nested field access + ContentField amountField = totalAmountObj.getFieldOrDefault("Amount"); + ContentField currencyField = totalAmountObj.getFieldOrDefault("CurrencyCode"); + + // Use getValue() instead of type-specific getters + Double amount = amountField != null ? (Double) amountField.getValue() : null; + String currency = currencyField != null ? (String) currencyField.getValue() : null; + + System.out.println("Total: " + (currency != null ? currency : "$") + + (amount != null ? String.format("%.2f", amount) : "(None)")); + if (totalAmountObj.getConfidence() != null) { + System.out.println(" Confidence: " + String.format("%.2f", totalAmountObj.getConfidence())); + } + if (totalAmountObj.getSource() != null && !totalAmountObj.getSource().isEmpty()) { + System.out.println(" Source: " + totalAmountObj.getSource()); + } + } + + // Extract array fields (collections like line items) using size() and get() convenience methods + ContentField lineItemsField + = documentContent.getFields() != null ? documentContent.getFields().get("LineItems") : null; + if (lineItemsField instanceof ArrayField) { + ArrayField lineItems = (ArrayField) lineItemsField; + // Use size() convenience method instead of getValueArray().size() + System.out.println("Line Items (" + lineItems.size() + "):"); + for (int i = 0; i < lineItems.size(); i++) { + // Use get(index) convenience method instead of getValueArray().get(i) + ContentField itemField = lineItems.get(i); + if (itemField instanceof ObjectField) { + ObjectField item = (ObjectField) itemField; + // Use getFieldOrDefault() for safe nested access + ContentField descField = item.getFieldOrDefault("Description"); + ContentField qtyField = item.getFieldOrDefault("Quantity"); + + // Use getValue() instead of type-specific getters + String description = descField != null ? (String) descField.getValue() : null; + Double quantity = qtyField != null ? (Double) qtyField.getValue() : null; + + System.out.println(" Item " + (i + 1) + ": " + (description != null ? description : "N/A") + + " (Qty: " + (quantity != null ? String.valueOf(quantity) : "N/A") + ")"); + if (item.getConfidence() != null) { + System.out.println(" Confidence: " + String.format("%.2f", item.getConfidence())); + } + } + } + } + } + // END:ContentUnderstandingExtractInvoiceFieldsAsync + + // BEGIN:Assertion_ContentUnderstandingExtractInvoiceFieldsAsync + MediaContent content = result.getContents().get(0); + assertNotNull(content, "Content should not be null"); + assertTrue(content instanceof DocumentContent, "Content should be of type DocumentContent"); + + if (content instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) content; + + // Verify basic document properties + assertTrue(docContent.getStartPageNumber() >= 1, "Start page should be >= 1"); + assertTrue(docContent.getEndPageNumber() >= docContent.getStartPageNumber(), + "End page should be >= start page"); + int totalPages = docContent.getEndPageNumber() - docContent.getStartPageNumber() + 1; + assertTrue(totalPages > 0, "Total pages should be positive"); + System.out.println("Document has " + totalPages + " page(s) from " + docContent.getStartPageNumber() + + " to " + docContent.getEndPageNumber()); + + System.out.println("All invoice fields validated successfully"); + } else { + // This should not happen given the assertTrue above, but handle it for completeness + fail("Content type validation failed: expected DocumentContent but got " + + (content != null ? content.getClass().getSimpleName() : "null")); + } + // END:Assertion_ContentUnderstandingExtractInvoiceFieldsAsync + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample04_CreateAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample04_CreateAnalyzer.java new file mode 100644 index 000000000000..12ed195ed65c --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample04_CreateAnalyzer.java @@ -0,0 +1,474 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.ContentSpan; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.ai.contentunderstanding.models.NumberField; +import com.azure.ai.contentunderstanding.models.StringField; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +/** + * Sample demonstrating how to create a custom analyzer with field schema. + * This sample shows: + * 1. Defining a field schema with custom fields + * 2. Demonstrating three extraction methods: Extract, Generate, Classify + * 3. Creating a custom analyzer with configuration + * 4. Using the custom analyzer to analyze documents + */ +public class Sample04_CreateAnalyzer extends ContentUnderstandingClientTestBase { + + private String createdAnalyzerId; + + @AfterEach + public void cleanup() { + if (createdAnalyzerId != null) { + try { + contentUnderstandingClient.deleteAnalyzer(createdAnalyzerId); + System.out.println("Analyzer '" + createdAnalyzerId + "' deleted successfully."); + } catch (Exception e) { + // Ignore cleanup errors + } + } + } + + @Test + public void testCreateAnalyzer() { + + // BEGIN:ContentUnderstandingCreateAnalyzer + // Generate a unique analyzer ID + String analyzerId = testResourceNamer.randomName("my_custom_analyzer_", 50); + + // Define field schema with custom fields + // This example demonstrates three extraction methods: + // - extract: Literal text extraction (requires estimateSourceAndConfidence) + // - generate: AI-generated values based on content interpretation + // - classify: Classification against predefined categories + Map fields = new HashMap<>(); + + ContentFieldDefinition companyNameDef = new ContentFieldDefinition(); + companyNameDef.setType(ContentFieldType.STRING); + companyNameDef.setMethod(GenerationMethod.EXTRACT); + companyNameDef.setDescription("Name of the company"); + fields.put("company_name", companyNameDef); + + ContentFieldDefinition totalAmountDef = new ContentFieldDefinition(); + totalAmountDef.setType(ContentFieldType.NUMBER); + totalAmountDef.setMethod(GenerationMethod.EXTRACT); + totalAmountDef.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountDef); + + ContentFieldDefinition summaryDef = new ContentFieldDefinition(); + summaryDef.setType(ContentFieldType.STRING); + summaryDef.setMethod(GenerationMethod.GENERATE); + summaryDef.setDescription("A brief summary of the document content"); + fields.put("document_summary", summaryDef); + + ContentFieldDefinition documentTypeDef = new ContentFieldDefinition(); + documentTypeDef.setType(ContentFieldType.STRING); + documentTypeDef.setMethod(GenerationMethod.CLASSIFY); + documentTypeDef.setDescription("Type of document"); + documentTypeDef.setEnumProperty(Arrays.asList("invoice", "receipt", "contract", "report", "other")); + fields.put("document_type", documentTypeDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("company_schema"); + fieldSchema.setDescription("Schema for extracting company information"); + fieldSchema.setFields(fields); + + // Create the custom analyzer with configuration + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer customAnalyzer = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Custom analyzer for extracting company information") + .setConfig(new ContentAnalyzerConfig().setEnableOcr(true) + .setEnableLayout(true) + .setEnableFormula(true) + .setEstimateFieldSourceAndConfidence(true) + .setReturnDetails(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + // Create the analyzer + SyncPoller operation + = contentUnderstandingClient.beginCreateAnalyzer(analyzerId, customAnalyzer, true); + + ContentAnalyzer result = operation.getFinalResult(); + System.out.println("Analyzer '" + analyzerId + "' created successfully!"); + if (result.getDescription() != null && !result.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + result.getDescription()); + } + + if (result.getFieldSchema() != null && result.getFieldSchema().getFields() != null) { + System.out.println(" Fields (" + result.getFieldSchema().getFields().size() + "):"); + result.getFieldSchema().getFields().forEach((fieldName, fieldDef) -> { + String method = fieldDef.getMethod() != null ? fieldDef.getMethod().toString() : "auto"; + String type = fieldDef.getType() != null ? fieldDef.getType().toString() : "unknown"; + System.out.println(" - " + fieldName + ": " + type + " (" + method + ")"); + }); + } + // END:ContentUnderstandingCreateAnalyzer + + createdAnalyzerId = analyzerId; // Track for cleanup + + // BEGIN:Assertion_ContentUnderstandingCreateAnalyzer + assertNotNull(analyzerId, "Analyzer ID should not be null"); + assertFalse(analyzerId.trim().isEmpty(), "Analyzer ID should not be empty"); + assertNotNull(fieldSchema, "Field schema should not be null"); + assertNotNull(customAnalyzer, "Custom analyzer should not be null"); + assertNotNull(operation, "Create analyzer operation should not be null"); + assertTrue(operation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + System.out.println("Create analyzer operation properties verified"); + + assertNotNull(result, "Analyzer result should not be null"); + System.out.println("Analyzer '" + analyzerId + "' created successfully"); + + // Verify base analyzer + assertNotNull(result.getBaseAnalyzerId(), "Base analyzer ID should not be null"); + assertEquals("prebuilt-document", result.getBaseAnalyzerId(), "Base analyzer ID should match"); + System.out.println("Base analyzer ID verified: " + result.getBaseAnalyzerId()); + + // Verify analyzer config + assertNotNull(result.getConfig(), "Analyzer config should not be null"); + assertTrue(result.getConfig().isEnableFormula(), "EnableFormula should be true"); + assertTrue(result.getConfig().isEnableLayout(), "EnableLayout should be true"); + assertTrue(result.getConfig().isEnableOcr(), "EnableOcr should be true"); + assertTrue(result.getConfig().isEstimateFieldSourceAndConfidence(), + "EstimateFieldSourceAndConfidence should be true"); + assertTrue(result.getConfig().isReturnDetails(), "ReturnDetails should be true"); + System.out.println("Analyzer config verified"); + + // Verify field schema + assertNotNull(result.getFieldSchema(), "Field schema should not be null"); + assertFalse(result.getFieldSchema().getName().trim().isEmpty(), "Field schema name should not be empty"); + assertEquals("company_schema", result.getFieldSchema().getName(), "Field schema name should match"); + assertFalse(result.getFieldSchema().getDescription().trim().isEmpty(), + "Field schema description should not be empty"); + System.out.println("Field schema verified: " + result.getFieldSchema().getName()); + + // Verify field schema fields + assertNotNull(result.getFieldSchema().getFields(), "Field schema fields should not be null"); + assertEquals(4, result.getFieldSchema().getFields().size(), "Should have 4 custom fields"); + System.out.println("Field schema contains " + result.getFieldSchema().getFields().size() + " fields"); + + // Verify company_name field + assertTrue(result.getFieldSchema().getFields().containsKey("company_name"), + "Should contain company_name field"); + ContentFieldDefinition companyNameDefResult = result.getFieldSchema().getFields().get("company_name"); + assertEquals(ContentFieldType.STRING, companyNameDefResult.getType(), "company_name should be String type"); + assertEquals(GenerationMethod.EXTRACT, companyNameDefResult.getMethod(), + "company_name should use Extract method"); + assertFalse(companyNameDefResult.getDescription().trim().isEmpty(), "company_name should have description"); + System.out.println(" company_name field verified (String, Extract)"); + + // Verify total_amount field + assertTrue(result.getFieldSchema().getFields().containsKey("total_amount"), + "Should contain total_amount field"); + ContentFieldDefinition totalAmountDefResult = result.getFieldSchema().getFields().get("total_amount"); + assertEquals(ContentFieldType.NUMBER, totalAmountDefResult.getType(), "total_amount should be Number type"); + assertEquals(GenerationMethod.EXTRACT, totalAmountDefResult.getMethod(), + "total_amount should use Extract method"); + assertFalse(totalAmountDefResult.getDescription().trim().isEmpty(), "total_amount should have description"); + System.out.println(" total_amount field verified (Number, Extract)"); + + // Verify document_summary field + assertTrue(result.getFieldSchema().getFields().containsKey("document_summary"), + "Should contain document_summary field"); + ContentFieldDefinition summaryDefResult = result.getFieldSchema().getFields().get("document_summary"); + assertEquals(ContentFieldType.STRING, summaryDefResult.getType(), "document_summary should be String type"); + assertEquals(GenerationMethod.GENERATE, summaryDefResult.getMethod(), + "document_summary should use Generate method"); + assertFalse(summaryDefResult.getDescription().trim().isEmpty(), "document_summary should have description"); + System.out.println(" document_summary field verified (String, Generate)"); + + // Verify document_type field + assertTrue(result.getFieldSchema().getFields().containsKey("document_type"), + "Should contain document_type field"); + ContentFieldDefinition documentTypeDefResult = result.getFieldSchema().getFields().get("document_type"); + assertEquals(ContentFieldType.STRING, documentTypeDefResult.getType(), "document_type should be String type"); + assertEquals(GenerationMethod.CLASSIFY, documentTypeDefResult.getMethod(), + "document_type should use Classify method"); + assertFalse(documentTypeDefResult.getDescription().trim().isEmpty(), "document_type should have description"); + assertNotNull(documentTypeDefResult.getEnumProperty(), "document_type should have enum values"); + assertEquals(5, documentTypeDefResult.getEnumProperty().size(), "document_type should have 5 enum values"); + assertTrue(documentTypeDefResult.getEnumProperty().contains("invoice"), + "document_type enum should contain 'invoice'"); + assertTrue(documentTypeDefResult.getEnumProperty().contains("receipt"), + "document_type enum should contain 'receipt'"); + assertTrue(documentTypeDefResult.getEnumProperty().contains("contract"), + "document_type enum should contain 'contract'"); + assertTrue(documentTypeDefResult.getEnumProperty().contains("report"), + "document_type enum should contain 'report'"); + assertTrue(documentTypeDefResult.getEnumProperty().contains("other"), + "document_type enum should contain 'other'"); + System.out.println(" document_type field verified (String, Classify, 5 enum values)"); + + // Verify models + assertNotNull(result.getModels(), "Models should not be null"); + assertTrue(result.getModels().size() >= 2, "Should have at least 2 model mappings"); + assertTrue(result.getModels().containsKey("completion"), "Should contain 'completion' model mapping"); + assertTrue(result.getModels().containsKey("embedding"), "Should contain 'embedding' model mapping"); + assertEquals("gpt-4.1", result.getModels().get("completion"), "Completion model should be 'gpt-4.1'"); + assertEquals("text-embedding-3-large", result.getModels().get("embedding"), + "Embedding model should be 'text-embedding-3-large'"); + System.out.println("Model mappings verified: " + result.getModels().size() + " model(s)"); + + // Verify description + if (result.getDescription() != null && !result.getDescription().trim().isEmpty()) { + System.out.println("Analyzer description: " + result.getDescription()); + } + + System.out.println("All analyzer creation properties validated successfully"); + // END:Assertion_ContentUnderstandingCreateAnalyzer + } + + @Test + public void testUseCustomAnalyzer() { + // First create an analyzer + String analyzerId = testResourceNamer.randomName("test_analyzer_", 50); + + Map fields = new HashMap<>(); + + ContentFieldDefinition companyNameDef = new ContentFieldDefinition(); + companyNameDef.setType(ContentFieldType.STRING); + companyNameDef.setMethod(GenerationMethod.EXTRACT); + companyNameDef.setDescription("Name of the company"); + fields.put("company_name", companyNameDef); + + ContentFieldDefinition totalAmountDef = new ContentFieldDefinition(); + totalAmountDef.setType(ContentFieldType.NUMBER); + totalAmountDef.setMethod(GenerationMethod.EXTRACT); + totalAmountDef.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountDef); + + ContentFieldDefinition summaryDef = new ContentFieldDefinition(); + summaryDef.setType(ContentFieldType.STRING); + summaryDef.setMethod(GenerationMethod.GENERATE); + summaryDef.setDescription("A brief summary of the document content"); + fields.put("document_summary", summaryDef); + + ContentFieldDefinition documentTypeDef = new ContentFieldDefinition(); + documentTypeDef.setType(ContentFieldType.STRING); + documentTypeDef.setMethod(GenerationMethod.CLASSIFY); + documentTypeDef.setDescription("Type of document"); + documentTypeDef.setEnumProperty(Arrays.asList("invoice", "receipt", "contract", "report", "other")); + fields.put("document_type", documentTypeDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("company_schema"); + fieldSchema.setDescription("Schema for extracting company information"); + fieldSchema.setFields(fields); + + ContentAnalyzerConfig config = new ContentAnalyzerConfig(); + config.setEnableFormula(true); + config.setEnableLayout(true); + config.setEnableOcr(true); + + ContentAnalyzer customAnalyzer = new ContentAnalyzer(); + customAnalyzer.setBaseAnalyzerId("prebuilt-document"); + customAnalyzer.setDescription("Custom analyzer for extracting company information"); + customAnalyzer.setConfig(config); + customAnalyzer.setFieldSchema(fieldSchema); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + customAnalyzer.setModels(models); + + contentUnderstandingClient.beginCreateAnalyzer(analyzerId, customAnalyzer).getFinalResult(); + createdAnalyzerId = analyzerId; // Track for cleanup + + try { + // BEGIN:ContentUnderstandingUseCustomAnalyzer + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String documentUrl + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(documentUrl); + + // Analyze a document using the custom analyzer + SyncPoller analyzeOperation + = contentUnderstandingClient.beginAnalyze(analyzerId, Arrays.asList(input)); + + AnalyzeResult analyzeResult = analyzeOperation.getFinalResult(); + + // Extract custom fields from the result + // Since EstimateFieldSourceAndConfidence is enabled, we can access confidence scores and source information + if (analyzeResult.getContents() != null + && !analyzeResult.getContents().isEmpty() + && analyzeResult.getContents().get(0) instanceof DocumentContent) { + DocumentContent content = (DocumentContent) analyzeResult.getContents().get(0); + + // Extract field (literal text extraction) + ContentField companyNameField + = content.getFields() != null ? content.getFields().get("company_name") : null; + if (companyNameField instanceof StringField) { + StringField sf = (StringField) companyNameField; + String companyName = sf.getValueString(); + System.out + .println("Company Name (extract): " + (companyName != null ? companyName : "(not found)")); + System.out.println(" Confidence: " + (companyNameField.getConfidence() != null + ? String.format("%.2f", companyNameField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (companyNameField.getSource() != null ? companyNameField.getSource() : "N/A")); + List spans = companyNameField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out.println( + " Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + // Extract field (literal text extraction) + ContentField totalAmountField + = content.getFields() != null ? content.getFields().get("total_amount") : null; + if (totalAmountField instanceof NumberField) { + NumberField nf = (NumberField) totalAmountField; + Double totalAmount = nf.getValueNumber(); + System.out.println("Total Amount (extract): " + + (totalAmount != null ? String.format("%.2f", totalAmount) : "(not found)")); + System.out.println(" Confidence: " + (totalAmountField.getConfidence() != null + ? String.format("%.2f", totalAmountField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (totalAmountField.getSource() != null ? totalAmountField.getSource() : "N/A")); + List spans = totalAmountField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out.println( + " Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + // Generate field (AI-generated value) + ContentField summaryField + = content.getFields() != null ? content.getFields().get("document_summary") : null; + if (summaryField instanceof StringField) { + StringField sf = (StringField) summaryField; + String summary = sf.getValueString(); + System.out.println("Document Summary (generate): " + (summary != null ? summary : "(not found)")); + System.out.println(" Confidence: " + (summaryField.getConfidence() != null + ? String.format("%.2f", summaryField.getConfidence()) + : "N/A")); + // Note: Generated fields may not have source information + if (summaryField.getSource() != null && !summaryField.getSource().isEmpty()) { + System.out.println(" Source: " + summaryField.getSource()); + } + } + + // Classify field (classification against predefined categories) + ContentField documentTypeField + = content.getFields() != null ? content.getFields().get("document_type") : null; + if (documentTypeField instanceof StringField) { + StringField sf = (StringField) documentTypeField; + String documentType = sf.getValueString(); + System.out + .println("Document Type (classify): " + (documentType != null ? documentType : "(not found)")); + System.out.println(" Confidence: " + (documentTypeField.getConfidence() != null + ? String.format("%.2f", documentTypeField.getConfidence()) + : "N/A")); + // Note: Classified fields may not have source information + if (documentTypeField.getSource() != null && !documentTypeField.getSource().isEmpty()) { + System.out.println(" Source: " + documentTypeField.getSource()); + } + } + } + // END:ContentUnderstandingUseCustomAnalyzer + + // BEGIN:Assertion_ContentUnderstandingUseCustomAnalyzer + assertNotNull(documentUrl, "Document URL should not be null"); + assertNotNull(analyzeOperation, "Analyze operation should not be null"); + assertTrue(analyzeOperation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + System.out.println("Analyze operation properties verified"); + + assertNotNull(analyzeResult, "Analyze result should not be null"); + assertNotNull(analyzeResult.getContents(), "Result should contain contents"); + assertTrue(analyzeResult.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, analyzeResult.getContents().size(), "Result should have exactly one content element"); + System.out.println("Analysis result contains " + analyzeResult.getContents().size() + " content(s)"); + + DocumentContent documentContent = analyzeResult.getContents().get(0) instanceof DocumentContent + ? (DocumentContent) analyzeResult.getContents().get(0) + : null; + assertNotNull(documentContent, "Content should be DocumentContent"); + assertNotNull(documentContent.getFields(), "Document content should have fields"); + System.out.println("Document content has custom fields"); + + // Verify company_name field (Extract method) + ContentField companyNameFieldAssert + = documentContent.getFields() != null ? documentContent.getFields().get("company_name") : null; + if (companyNameFieldAssert != null) { + System.out.println("company_name field found"); + assertTrue(companyNameFieldAssert instanceof StringField, "company_name should be a StringField"); + + if (companyNameFieldAssert instanceof StringField) { + StringField cnf = (StringField) companyNameFieldAssert; + if (cnf.getValueString() != null && !cnf.getValueString().trim().isEmpty()) { + System.out.println(" Value: " + cnf.getValueString()); + } + } + + if (companyNameFieldAssert.getConfidence() != null) { + assertTrue( + companyNameFieldAssert.getConfidence() >= 0 && companyNameFieldAssert.getConfidence() <= 1, + "company_name confidence should be between 0 and 1, but was " + + companyNameFieldAssert.getConfidence()); + System.out + .println(" Confidence: " + String.format("%.2f", companyNameFieldAssert.getConfidence())); + } + + if (companyNameFieldAssert.getSource() != null + && !companyNameFieldAssert.getSource().trim().isEmpty()) { + assertTrue(companyNameFieldAssert.getSource().startsWith("D("), + "Source should start with 'D(' for extracted fields"); + System.out.println(" Source: " + companyNameFieldAssert.getSource()); + } + + List spans = companyNameFieldAssert.getSpans(); + if (spans != null && !spans.isEmpty()) { + assertTrue(spans.size() > 0, "Spans should not be empty when not null"); + for (ContentSpan span : spans) { + assertTrue(span.getOffset() >= 0, "Span offset should be >= 0, but was " + span.getOffset()); + assertTrue(span.getLength() > 0, "Span length should be > 0, but was " + span.getLength()); + } + System.out.println(" Spans: " + spans.size() + " span(s)"); + } + } else { + System.out.println("⚠️ company_name field not found"); + } + + System.out.println("All custom analyzer usage properties validated successfully"); + // END:Assertion_ContentUnderstandingUseCustomAnalyzer + } finally { + // Cleanup is handled by @AfterEach + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample04_CreateAnalyzerAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample04_CreateAnalyzerAsync.java new file mode 100644 index 000000000000..36a31f7bd426 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample04_CreateAnalyzerAsync.java @@ -0,0 +1,498 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.ContentSpan; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.ai.contentunderstanding.models.NumberField; +import com.azure.ai.contentunderstanding.models.StringField; +import com.azure.core.util.polling.PollerFlux; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Async sample demonstrating how to create a custom analyzer with field schema. + * This sample shows: + * 1. Defining a field schema with custom fields + * 2. Demonstrating three extraction methods: Extract, Generate, Classify + * 3. Creating a custom analyzer with configuration asynchronously + * 4. Using the custom analyzer to analyze documents + */ +public class Sample04_CreateAnalyzerAsync extends ContentUnderstandingClientTestBase { + + private String createdAnalyzerId; + + @AfterEach + public void cleanup() { + if (createdAnalyzerId != null) { + try { + contentUnderstandingAsyncClient.deleteAnalyzer(createdAnalyzerId).block(); + System.out.println("Analyzer '" + createdAnalyzerId + "' deleted successfully."); + } catch (Exception e) { + // Ignore cleanup errors + } + } + } + + @Test + public void testCreateAnalyzerAsync() { + + // BEGIN:ContentUnderstandingCreateAnalyzerAsync + // Generate a unique analyzer ID + String analyzerId = testResourceNamer.randomName("my_custom_analyzer_", 50); + + // Define field schema with custom fields + // This example demonstrates three extraction methods: + // - extract: Literal text extraction (requires estimateSourceAndConfidence) + // - generate: AI-generated values based on content interpretation + // - classify: Classification against predefined categories + Map fields = new HashMap<>(); + + ContentFieldDefinition companyNameDef = new ContentFieldDefinition(); + companyNameDef.setType(ContentFieldType.STRING); + companyNameDef.setMethod(GenerationMethod.EXTRACT); + companyNameDef.setDescription("Name of the company"); + fields.put("company_name", companyNameDef); + + ContentFieldDefinition totalAmountDef = new ContentFieldDefinition(); + totalAmountDef.setType(ContentFieldType.NUMBER); + totalAmountDef.setMethod(GenerationMethod.EXTRACT); + totalAmountDef.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountDef); + + ContentFieldDefinition summaryDef = new ContentFieldDefinition(); + summaryDef.setType(ContentFieldType.STRING); + summaryDef.setMethod(GenerationMethod.GENERATE); + summaryDef.setDescription("A brief summary of the document content"); + fields.put("document_summary", summaryDef); + + ContentFieldDefinition documentTypeDef = new ContentFieldDefinition(); + documentTypeDef.setType(ContentFieldType.STRING); + documentTypeDef.setMethod(GenerationMethod.CLASSIFY); + documentTypeDef.setDescription("Type of document"); + documentTypeDef.setEnumProperty(Arrays.asList("invoice", "receipt", "contract", "report", "other")); + fields.put("document_type", documentTypeDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("company_schema"); + fieldSchema.setDescription("Schema for extracting company information"); + fieldSchema.setFields(fields); + + // Create the custom analyzer with configuration + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer customAnalyzer = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Custom analyzer for extracting company information") + .setConfig(new ContentAnalyzerConfig().setEnableOcr(true) + .setEnableLayout(true) + .setEnableFormula(true) + .setEstimateFieldSourceAndConfidence(true) + .setReturnDetails(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + // Create the analyzer + PollerFlux operation + = contentUnderstandingAsyncClient.beginCreateAnalyzer(analyzerId, customAnalyzer, true); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + ContentAnalyzer result = operation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + System.out.println("Analyzer '" + analyzerId + "' created successfully!"); + if (result.getDescription() != null && !result.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + result.getDescription()); + } + + if (result.getFieldSchema() != null && result.getFieldSchema().getFields() != null) { + System.out.println(" Fields (" + result.getFieldSchema().getFields().size() + "):"); + result.getFieldSchema().getFields().forEach((fieldName, fieldDef) -> { + String method = fieldDef.getMethod() != null ? fieldDef.getMethod().toString() : "auto"; + String type = fieldDef.getType() != null ? fieldDef.getType().toString() : "unknown"; + System.out.println(" - " + fieldName + ": " + type + " (" + method + ")"); + }); + } + // END:ContentUnderstandingCreateAnalyzerAsync + + createdAnalyzerId = analyzerId; // Track for cleanup + + // BEGIN:Assertion_ContentUnderstandingCreateAnalyzerAsync + assertNotNull(analyzerId, "Analyzer ID should not be null"); + assertFalse(analyzerId.trim().isEmpty(), "Analyzer ID should not be empty"); + assertNotNull(fieldSchema, "Field schema should not be null"); + assertNotNull(customAnalyzer, "Custom analyzer should not be null"); + assertNotNull(operation, "Create analyzer operation should not be null"); + assertNotNull(result, "Analyzer result should not be null"); + System.out.println("Create analyzer operation properties verified"); + System.out.println("Analyzer '" + analyzerId + "' created successfully"); + + // Verify base analyzer + assertNotNull(result.getBaseAnalyzerId(), "Base analyzer ID should not be null"); + assertEquals("prebuilt-document", result.getBaseAnalyzerId(), "Base analyzer ID should match"); + System.out.println("Base analyzer ID verified: " + result.getBaseAnalyzerId()); + + // Verify analyzer config + assertNotNull(result.getConfig(), "Analyzer config should not be null"); + assertTrue(result.getConfig().isEnableFormula(), "EnableFormula should be true"); + assertTrue(result.getConfig().isEnableLayout(), "EnableLayout should be true"); + assertTrue(result.getConfig().isEnableOcr(), "EnableOcr should be true"); + assertTrue(result.getConfig().isEstimateFieldSourceAndConfidence(), + "EstimateFieldSourceAndConfidence should be true"); + assertTrue(result.getConfig().isReturnDetails(), "ReturnDetails should be true"); + System.out.println("Analyzer config verified"); + + // Verify field schema + assertNotNull(result.getFieldSchema(), "Field schema should not be null"); + assertFalse(result.getFieldSchema().getName().trim().isEmpty(), "Field schema name should not be empty"); + assertEquals("company_schema", result.getFieldSchema().getName(), "Field schema name should match"); + assertFalse(result.getFieldSchema().getDescription().trim().isEmpty(), + "Field schema description should not be empty"); + System.out.println("Field schema verified: " + result.getFieldSchema().getName()); + + // Verify field schema fields + assertNotNull(result.getFieldSchema().getFields(), "Field schema fields should not be null"); + assertEquals(4, result.getFieldSchema().getFields().size(), "Should have 4 custom fields"); + System.out.println("Field schema contains " + result.getFieldSchema().getFields().size() + " fields"); + + // Verify company_name field + assertTrue(result.getFieldSchema().getFields().containsKey("company_name"), + "Should contain company_name field"); + ContentFieldDefinition companyNameDefResult = result.getFieldSchema().getFields().get("company_name"); + assertEquals(ContentFieldType.STRING, companyNameDefResult.getType(), "company_name should be String type"); + assertEquals(GenerationMethod.EXTRACT, companyNameDefResult.getMethod(), + "company_name should use Extract method"); + assertFalse(companyNameDefResult.getDescription().trim().isEmpty(), "company_name should have description"); + System.out.println(" company_name field verified (String, Extract)"); + + // Verify total_amount field + assertTrue(result.getFieldSchema().getFields().containsKey("total_amount"), + "Should contain total_amount field"); + ContentFieldDefinition totalAmountDefResult = result.getFieldSchema().getFields().get("total_amount"); + assertEquals(ContentFieldType.NUMBER, totalAmountDefResult.getType(), "total_amount should be Number type"); + assertEquals(GenerationMethod.EXTRACT, totalAmountDefResult.getMethod(), + "total_amount should use Extract method"); + assertFalse(totalAmountDefResult.getDescription().trim().isEmpty(), "total_amount should have description"); + System.out.println(" total_amount field verified (Number, Extract)"); + + // Verify document_summary field + assertTrue(result.getFieldSchema().getFields().containsKey("document_summary"), + "Should contain document_summary field"); + ContentFieldDefinition summaryDefResult = result.getFieldSchema().getFields().get("document_summary"); + assertEquals(ContentFieldType.STRING, summaryDefResult.getType(), "document_summary should be String type"); + assertEquals(GenerationMethod.GENERATE, summaryDefResult.getMethod(), + "document_summary should use Generate method"); + assertFalse(summaryDefResult.getDescription().trim().isEmpty(), "document_summary should have description"); + System.out.println(" document_summary field verified (String, Generate)"); + + // Verify document_type field + assertTrue(result.getFieldSchema().getFields().containsKey("document_type"), + "Should contain document_type field"); + ContentFieldDefinition documentTypeDefResult = result.getFieldSchema().getFields().get("document_type"); + assertEquals(ContentFieldType.STRING, documentTypeDefResult.getType(), "document_type should be String type"); + assertEquals(GenerationMethod.CLASSIFY, documentTypeDefResult.getMethod(), + "document_type should use Classify method"); + assertFalse(documentTypeDefResult.getDescription().trim().isEmpty(), "document_type should have description"); + assertNotNull(documentTypeDefResult.getEnumProperty(), "document_type should have enum values"); + assertEquals(5, documentTypeDefResult.getEnumProperty().size(), "document_type should have 5 enum values"); + assertTrue(documentTypeDefResult.getEnumProperty().contains("invoice"), + "document_type enum should contain 'invoice'"); + assertTrue(documentTypeDefResult.getEnumProperty().contains("receipt"), + "document_type enum should contain 'receipt'"); + assertTrue(documentTypeDefResult.getEnumProperty().contains("contract"), + "document_type enum should contain 'contract'"); + assertTrue(documentTypeDefResult.getEnumProperty().contains("report"), + "document_type enum should contain 'report'"); + assertTrue(documentTypeDefResult.getEnumProperty().contains("other"), + "document_type enum should contain 'other'"); + System.out.println(" document_type field verified (String, Classify, 5 enum values)"); + + // Verify models + assertNotNull(result.getModels(), "Models should not be null"); + assertTrue(result.getModels().size() >= 2, "Should have at least 2 model mappings"); + assertTrue(result.getModels().containsKey("completion"), "Should contain 'completion' model mapping"); + assertTrue(result.getModels().containsKey("embedding"), "Should contain 'embedding' model mapping"); + assertEquals("gpt-4.1", result.getModels().get("completion"), "Completion model should be 'gpt-4.1'"); + assertEquals("text-embedding-3-large", result.getModels().get("embedding"), + "Embedding model should be 'text-embedding-3-large'"); + System.out.println("Model mappings verified: " + result.getModels().size() + " model(s)"); + + // Verify description + if (result.getDescription() != null && !result.getDescription().trim().isEmpty()) { + System.out.println("Analyzer description: " + result.getDescription()); + } + + System.out.println("All analyzer creation properties validated successfully"); + // END:Assertion_ContentUnderstandingCreateAnalyzerAsync + } + + @Test + public void testUseCustomAnalyzerAsync() { + // First create an analyzer + String analyzerId = testResourceNamer.randomName("test_analyzer_", 50); + + Map fields = new HashMap<>(); + + ContentFieldDefinition companyNameDef = new ContentFieldDefinition(); + companyNameDef.setType(ContentFieldType.STRING); + companyNameDef.setMethod(GenerationMethod.EXTRACT); + companyNameDef.setDescription("Name of the company"); + fields.put("company_name", companyNameDef); + + ContentFieldDefinition totalAmountDef = new ContentFieldDefinition(); + totalAmountDef.setType(ContentFieldType.NUMBER); + totalAmountDef.setMethod(GenerationMethod.EXTRACT); + totalAmountDef.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountDef); + + ContentFieldDefinition summaryDef = new ContentFieldDefinition(); + summaryDef.setType(ContentFieldType.STRING); + summaryDef.setMethod(GenerationMethod.GENERATE); + summaryDef.setDescription("A brief summary of the document content"); + fields.put("document_summary", summaryDef); + + ContentFieldDefinition documentTypeDef = new ContentFieldDefinition(); + documentTypeDef.setType(ContentFieldType.STRING); + documentTypeDef.setMethod(GenerationMethod.CLASSIFY); + documentTypeDef.setDescription("Type of document"); + documentTypeDef.setEnumProperty(Arrays.asList("invoice", "receipt", "contract", "report", "other")); + fields.put("document_type", documentTypeDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("company_schema"); + fieldSchema.setDescription("Schema for extracting company information"); + fieldSchema.setFields(fields); + + ContentAnalyzerConfig config = new ContentAnalyzerConfig(); + config.setEnableFormula(true); + config.setEnableLayout(true); + config.setEnableOcr(true); + + ContentAnalyzer customAnalyzer = new ContentAnalyzer(); + customAnalyzer.setBaseAnalyzerId("prebuilt-document"); + customAnalyzer.setDescription("Custom analyzer for extracting company information"); + customAnalyzer.setConfig(config); + customAnalyzer.setFieldSchema(fieldSchema); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + customAnalyzer.setModels(models); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + contentUnderstandingAsyncClient.beginCreateAnalyzer(analyzerId, customAnalyzer).last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + createdAnalyzerId = analyzerId; // Track for cleanup + + try { + // BEGIN:ContentUnderstandingUseCustomAnalyzerAsync + // Using a publicly accessible sample file from Azure-Samples GitHub repository + String documentUrl + = "https://raw.githubusercontent.com/Azure-Samples/azure-ai-content-understanding-dotnet/main/ContentUnderstanding.Common/data/invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(documentUrl); + + // Analyze a document using the custom analyzer + PollerFlux analyzeOperation + = contentUnderstandingAsyncClient.beginAnalyze(analyzerId, Arrays.asList(input)); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + AnalyzeResult analyzeResult = analyzeOperation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + // Extract custom fields from the result + // Since EstimateFieldSourceAndConfidence is enabled, we can access confidence scores and source information + if (analyzeResult.getContents() != null + && !analyzeResult.getContents().isEmpty() + && analyzeResult.getContents().get(0) instanceof DocumentContent) { + DocumentContent content = (DocumentContent) analyzeResult.getContents().get(0); + + // Extract field (literal text extraction) + ContentField companyNameField + = content.getFields() != null ? content.getFields().get("company_name") : null; + if (companyNameField instanceof StringField) { + StringField sf = (StringField) companyNameField; + String companyName = sf.getValueString(); + System.out + .println("Company Name (extract): " + (companyName != null ? companyName : "(not found)")); + System.out.println(" Confidence: " + (companyNameField.getConfidence() != null + ? String.format("%.2f", companyNameField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (companyNameField.getSource() != null ? companyNameField.getSource() : "N/A")); + List spans = companyNameField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out.println( + " Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + // Extract field (literal text extraction) + ContentField totalAmountField + = content.getFields() != null ? content.getFields().get("total_amount") : null; + if (totalAmountField instanceof NumberField) { + NumberField nf = (NumberField) totalAmountField; + Double totalAmount = nf.getValueNumber(); + System.out.println("Total Amount (extract): " + + (totalAmount != null ? String.format("%.2f", totalAmount) : "(not found)")); + System.out.println(" Confidence: " + (totalAmountField.getConfidence() != null + ? String.format("%.2f", totalAmountField.getConfidence()) + : "N/A")); + System.out.println( + " Source: " + (totalAmountField.getSource() != null ? totalAmountField.getSource() : "N/A")); + List spans = totalAmountField.getSpans(); + if (spans != null && !spans.isEmpty()) { + ContentSpan span = spans.get(0); + System.out.println( + " Position in markdown: offset=" + span.getOffset() + ", length=" + span.getLength()); + } + } + + // Generate field (AI-generated value) + ContentField summaryField + = content.getFields() != null ? content.getFields().get("document_summary") : null; + if (summaryField instanceof StringField) { + StringField sf = (StringField) summaryField; + String summary = sf.getValueString(); + System.out.println("Document Summary (generate): " + (summary != null ? summary : "(not found)")); + System.out.println(" Confidence: " + (summaryField.getConfidence() != null + ? String.format("%.2f", summaryField.getConfidence()) + : "N/A")); + // Note: Generated fields may not have source information + if (summaryField.getSource() != null && !summaryField.getSource().isEmpty()) { + System.out.println(" Source: " + summaryField.getSource()); + } + } + + // Classify field (classification against predefined categories) + ContentField documentTypeField + = content.getFields() != null ? content.getFields().get("document_type") : null; + if (documentTypeField instanceof StringField) { + StringField sf = (StringField) documentTypeField; + String documentType = sf.getValueString(); + System.out + .println("Document Type (classify): " + (documentType != null ? documentType : "(not found)")); + System.out.println(" Confidence: " + (documentTypeField.getConfidence() != null + ? String.format("%.2f", documentTypeField.getConfidence()) + : "N/A")); + // Note: Classified fields may not have source information + if (documentTypeField.getSource() != null && !documentTypeField.getSource().isEmpty()) { + System.out.println(" Source: " + documentTypeField.getSource()); + } + } + } + // END:ContentUnderstandingUseCustomAnalyzerAsync + + // BEGIN:Assertion_ContentUnderstandingUseCustomAnalyzerAsync + assertNotNull(documentUrl, "Document URL should not be null"); + assertNotNull(analyzeOperation, "Analyze operation should not be null"); + assertNotNull(analyzeResult, "Analyze result should not be null"); + assertNotNull(analyzeResult.getContents(), "Result should contain contents"); + assertTrue(analyzeResult.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, analyzeResult.getContents().size(), "Result should have exactly one content element"); + System.out.println("Analyze operation properties verified"); + System.out.println("Analysis result contains " + analyzeResult.getContents().size() + " content(s)"); + + DocumentContent documentContent = analyzeResult.getContents().get(0) instanceof DocumentContent + ? (DocumentContent) analyzeResult.getContents().get(0) + : null; + assertNotNull(documentContent, "Content should be DocumentContent"); + assertNotNull(documentContent.getFields(), "Document content should have fields"); + System.out.println("Document content has custom fields"); + + // Verify company_name field (Extract method) + ContentField companyNameFieldAssert + = documentContent.getFields() != null ? documentContent.getFields().get("company_name") : null; + if (companyNameFieldAssert != null) { + System.out.println("company_name field found"); + assertTrue(companyNameFieldAssert instanceof StringField, "company_name should be a StringField"); + + if (companyNameFieldAssert instanceof StringField) { + StringField cnf = (StringField) companyNameFieldAssert; + if (cnf.getValueString() != null && !cnf.getValueString().trim().isEmpty()) { + System.out.println(" Value: " + cnf.getValueString()); + } + } + + if (companyNameFieldAssert.getConfidence() != null) { + assertTrue( + companyNameFieldAssert.getConfidence() >= 0 && companyNameFieldAssert.getConfidence() <= 1, + "company_name confidence should be between 0 and 1, but was " + + companyNameFieldAssert.getConfidence()); + System.out + .println(" Confidence: " + String.format("%.2f", companyNameFieldAssert.getConfidence())); + } + + if (companyNameFieldAssert.getSource() != null + && !companyNameFieldAssert.getSource().trim().isEmpty()) { + assertTrue(companyNameFieldAssert.getSource().startsWith("D("), + "Source should start with 'D(' for extracted fields"); + System.out.println(" Source: " + companyNameFieldAssert.getSource()); + } + + List spans = companyNameFieldAssert.getSpans(); + if (spans != null && !spans.isEmpty()) { + assertTrue(spans.size() > 0, "Spans should not be empty when not null"); + for (ContentSpan span : spans) { + assertTrue(span.getOffset() >= 0, "Span offset should be >= 0, but was " + span.getOffset()); + assertTrue(span.getLength() > 0, "Span length should be > 0, but was " + span.getLength()); + } + System.out.println(" Spans: " + spans.size() + " span(s)"); + } + } else { + System.out.println("⚠️ company_name field not found"); + } + + System.out.println("All custom analyzer usage properties validated successfully"); + // END:Assertion_ContentUnderstandingUseCustomAnalyzerAsync + } finally { + // Cleanup is handled by @AfterEach + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample05_CreateClassifier.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample05_CreateClassifier.java new file mode 100644 index 000000000000..6faa23ae26ea --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample05_CreateClassifier.java @@ -0,0 +1,181 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentCategoryDefinition; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.HashMap; +import java.util.Map; + +/** + * Sample demonstrating how to create a classifier analyzer. + * + * This sample shows how to create a classifier that categorizes documents into predefined + * custom categories using ContentCategories. Classifiers are useful for: + * - Content organization: Organize large document collections by type through categorization + * - Data routing (optional): Route data to specific custom analyzers based on category + * - Multi-document processing: Process files containing multiple document types by automatically + * segmenting them + */ +public class Sample05_CreateClassifier extends ContentUnderstandingClientTestBase { + + private String createdAnalyzerId; + + @AfterEach + public void cleanup() { + if (createdAnalyzerId != null) { + try { + contentUnderstandingClient.deleteAnalyzer(createdAnalyzerId); + System.out.println("Classifier analyzer '" + createdAnalyzerId + "' deleted successfully."); + } catch (Exception e) { + // Ignore cleanup errors + } + } + } + + @Test + public void testCreateClassifier() { + + // BEGIN:ContentUnderstandingCreateClassifier + // Generate a unique classifier analyzer ID + String analyzerId = testResourceNamer.randomName("document_classifier_", 50); + + System.out.println("Creating classifier analyzer '" + analyzerId + "'..."); + + // Define content categories for classification + // Each category has a description that helps the AI model understand what documents belong to it + Map categories = new HashMap<>(); + + categories.put("Loan_Application", + new ContentCategoryDefinition() + .setDescription("Documents submitted by individuals or businesses to request funding, " + + "typically including personal or business details, financial history, loan amount, " + + "purpose, and supporting documentation.")); + + categories.put("Invoice", + new ContentCategoryDefinition() + .setDescription("Billing documents issued by sellers or service providers to request payment " + + "for goods or services, detailing items, prices, taxes, totals, and payment terms.")); + + categories.put("Bank_Statement", + new ContentCategoryDefinition() + .setDescription("Official statements issued by banks that summarize account activity over a period, " + + "including deposits, withdrawals, fees, and balances.")); + + // Create analyzer configuration with content categories + ContentAnalyzerConfig config = new ContentAnalyzerConfig().setReturnDetails(true) + .setEnableSegment(true) // Enable automatic segmentation by category + .setContentCategories(categories); + + // Create the classifier analyzer + // Note: models are specified using model names, not deployment names + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + + ContentAnalyzer classifier = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Custom classifier for financial document categorization") + .setConfig(config) + .setModels(models); + + // Create the classifier + SyncPoller operation + = contentUnderstandingClient.beginCreateAnalyzer(analyzerId, classifier, true); + + ContentAnalyzer result = operation.getFinalResult(); + System.out.println("Classifier '" + analyzerId + "' created successfully!"); + // END:ContentUnderstandingCreateClassifier + + createdAnalyzerId = analyzerId; // Track for cleanup + + // BEGIN:Assertion_ContentUnderstandingCreateClassifier + // Verify basic properties + assertNotNull(analyzerId, "Analyzer ID should not be null"); + assertFalse(analyzerId.trim().isEmpty(), "Analyzer ID should not be empty"); + assertNotNull(operation, "Create analyzer operation should not be null"); + assertTrue(operation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + System.out.println("✓ Create classifier operation completed successfully"); + + assertNotNull(result, "Analyzer result should not be null"); + System.out.println("✓ Classifier analyzer created: " + analyzerId); + + // Verify base analyzer + assertNotNull(result.getBaseAnalyzerId(), "Base analyzer ID should not be null"); + assertEquals("prebuilt-document", result.getBaseAnalyzerId(), "Base analyzer ID should match"); + System.out.println("✓ Base analyzer ID verified: " + result.getBaseAnalyzerId()); + + // Verify description + assertNotNull(result.getDescription(), "Description should not be null"); + assertEquals("Custom classifier for financial document categorization", result.getDescription(), + "Description should match"); + System.out.println("✓ Description verified: " + result.getDescription()); + + // Verify analyzer config + assertNotNull(result.getConfig(), "Analyzer config should not be null"); + System.out.println("✓ Analyzer config present"); + + // Verify content categories + assertNotNull(result.getConfig().getContentCategories(), "Content categories should not be null"); + assertEquals(3, result.getConfig().getContentCategories().size(), "Should have 3 content categories"); + System.out.println("✓ Content categories count verified: " + result.getConfig().getContentCategories().size()); + + // Verify Loan_Application category + assertTrue(result.getConfig().getContentCategories().containsKey("Loan_Application"), + "Should contain Loan_Application category"); + ContentCategoryDefinition loanAppCategory = result.getConfig().getContentCategories().get("Loan_Application"); + assertNotNull(loanAppCategory.getDescription(), "Loan_Application description should not be null"); + assertTrue(loanAppCategory.getDescription().contains("funding"), + "Loan_Application description should mention funding"); + System.out.println(" ✓ Loan_Application category verified"); + + // Verify Invoice category + assertTrue(result.getConfig().getContentCategories().containsKey("Invoice"), "Should contain Invoice category"); + ContentCategoryDefinition invoiceCategory = result.getConfig().getContentCategories().get("Invoice"); + assertNotNull(invoiceCategory.getDescription(), "Invoice description should not be null"); + assertTrue(invoiceCategory.getDescription().contains("payment"), "Invoice description should mention payment"); + System.out.println(" ✓ Invoice category verified"); + + // Verify Bank_Statement category + assertTrue(result.getConfig().getContentCategories().containsKey("Bank_Statement"), + "Should contain Bank_Statement category"); + ContentCategoryDefinition bankCategory = result.getConfig().getContentCategories().get("Bank_Statement"); + assertNotNull(bankCategory.getDescription(), "Bank_Statement description should not be null"); + assertTrue(bankCategory.getDescription().contains("account activity"), + "Bank_Statement description should mention account activity"); + System.out.println(" ✓ Bank_Statement category verified"); + + // Verify enableSegment is set + assertNotNull(result.getConfig().isEnableSegment(), "EnableSegment should not be null"); + assertTrue(result.getConfig().isEnableSegment(), "EnableSegment should be true"); + System.out.println("✓ EnableSegment verified: " + result.getConfig().isEnableSegment()); + + // Verify returnDetails is set + assertNotNull(result.getConfig().isReturnDetails(), "ReturnDetails should not be null"); + assertTrue(result.getConfig().isReturnDetails(), "ReturnDetails should be true"); + System.out.println("✓ ReturnDetails verified: " + result.getConfig().isReturnDetails()); + + // Verify models + assertNotNull(result.getModels(), "Models should not be null"); + assertTrue(result.getModels().containsKey("completion"), "Should contain 'completion' model mapping"); + System.out.println("✓ Model mappings verified: " + result.getModels().size() + " model(s)"); + + System.out.println("\n════════════════════════════════════════════════════════════"); + System.out.println("✓ CLASSIFIER CREATION VERIFIED SUCCESSFULLY"); + System.out.println("════════════════════════════════════════════════════════════"); + System.out.println(" Analyzer ID: " + analyzerId); + System.out.println(" Base Analyzer: " + result.getBaseAnalyzerId()); + System.out.println(" Categories: " + result.getConfig().getContentCategories().size()); + System.out.println(" Segmentation: " + result.getConfig().isEnableSegment()); + System.out.println("════════════════════════════════════════════════════════════"); + // END:Assertion_ContentUnderstandingCreateClassifier + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample05_CreateClassifierAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample05_CreateClassifierAsync.java new file mode 100644 index 000000000000..723283c1af21 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample05_CreateClassifierAsync.java @@ -0,0 +1,190 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentCategoryDefinition; +import com.azure.core.util.polling.PollerFlux; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.HashMap; +import java.util.Map; + +/** + * Async sample demonstrating how to create a classifier analyzer. + * + * This sample shows how to create a classifier that categorizes documents into predefined + * custom categories using ContentCategories asynchronously. Classifiers are useful for: + * - Content organization: Organize large document collections by type through categorization + * - Data routing (optional): Route data to specific custom analyzers based on category + * - Multi-document processing: Process files containing multiple document types by automatically + * segmenting them + */ +public class Sample05_CreateClassifierAsync extends ContentUnderstandingClientTestBase { + + private String createdAnalyzerId; + + @AfterEach + public void cleanup() { + if (createdAnalyzerId != null) { + try { + contentUnderstandingAsyncClient.deleteAnalyzer(createdAnalyzerId).block(); + System.out.println("Classifier analyzer '" + createdAnalyzerId + "' deleted successfully."); + } catch (Exception e) { + // Ignore cleanup errors + } + } + } + + @Test + public void testCreateClassifierAsync() { + + // BEGIN:ContentUnderstandingCreateClassifierAsync + // Generate a unique classifier analyzer ID + String analyzerId = testResourceNamer.randomName("document_classifier_", 50); + + System.out.println("Creating classifier analyzer '" + analyzerId + "'..."); + + // Define content categories for classification + // Each category has a description that helps the AI model understand what documents belong to it + Map categories = new HashMap<>(); + + categories.put("Loan_Application", + new ContentCategoryDefinition() + .setDescription("Documents submitted by individuals or businesses to request funding, " + + "typically including personal or business details, financial history, loan amount, " + + "purpose, and supporting documentation.")); + + categories.put("Invoice", + new ContentCategoryDefinition() + .setDescription("Billing documents issued by sellers or service providers to request payment " + + "for goods or services, detailing items, prices, taxes, totals, and payment terms.")); + + categories.put("Bank_Statement", + new ContentCategoryDefinition() + .setDescription("Official statements issued by banks that summarize account activity over a period, " + + "including deposits, withdrawals, fees, and balances.")); + + // Create analyzer configuration with content categories + ContentAnalyzerConfig config = new ContentAnalyzerConfig().setReturnDetails(true) + .setEnableSegment(true) // Enable automatic segmentation by category + .setContentCategories(categories); + + // Create the classifier analyzer + // Note: models are specified using model names, not deployment names + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + + ContentAnalyzer classifier = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Custom classifier for financial document categorization") + .setConfig(config) + .setModels(models); + + // Create the classifier + PollerFlux operation + = contentUnderstandingAsyncClient.beginCreateAnalyzer(analyzerId, classifier, true); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + ContentAnalyzer result = operation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + System.out.println("Classifier '" + analyzerId + "' created successfully!"); + // END:ContentUnderstandingCreateClassifierAsync + + createdAnalyzerId = analyzerId; // Track for cleanup + + // BEGIN:Assertion_ContentUnderstandingCreateClassifierAsync + // Verify basic properties + assertNotNull(analyzerId, "Analyzer ID should not be null"); + assertFalse(analyzerId.trim().isEmpty(), "Analyzer ID should not be empty"); + assertNotNull(operation, "Create analyzer operation should not be null"); + assertNotNull(result, "Analyzer result should not be null"); + System.out.println("✓ Create classifier operation completed successfully"); + System.out.println("✓ Classifier analyzer created: " + analyzerId); + + // Verify base analyzer + assertNotNull(result.getBaseAnalyzerId(), "Base analyzer ID should not be null"); + assertEquals("prebuilt-document", result.getBaseAnalyzerId(), "Base analyzer ID should match"); + System.out.println("✓ Base analyzer ID verified: " + result.getBaseAnalyzerId()); + + // Verify description + assertNotNull(result.getDescription(), "Description should not be null"); + assertEquals("Custom classifier for financial document categorization", result.getDescription(), + "Description should match"); + System.out.println("✓ Description verified: " + result.getDescription()); + + // Verify analyzer config + assertNotNull(result.getConfig(), "Analyzer config should not be null"); + System.out.println("✓ Analyzer config present"); + + // Verify content categories + assertNotNull(result.getConfig().getContentCategories(), "Content categories should not be null"); + assertEquals(3, result.getConfig().getContentCategories().size(), "Should have 3 content categories"); + System.out.println("✓ Content categories count verified: " + result.getConfig().getContentCategories().size()); + + // Verify Loan_Application category + assertTrue(result.getConfig().getContentCategories().containsKey("Loan_Application"), + "Should contain Loan_Application category"); + ContentCategoryDefinition loanAppCategory = result.getConfig().getContentCategories().get("Loan_Application"); + assertNotNull(loanAppCategory.getDescription(), "Loan_Application description should not be null"); + assertTrue(loanAppCategory.getDescription().contains("funding"), + "Loan_Application description should mention funding"); + System.out.println(" ✓ Loan_Application category verified"); + + // Verify Invoice category + assertTrue(result.getConfig().getContentCategories().containsKey("Invoice"), "Should contain Invoice category"); + ContentCategoryDefinition invoiceCategory = result.getConfig().getContentCategories().get("Invoice"); + assertNotNull(invoiceCategory.getDescription(), "Invoice description should not be null"); + assertTrue(invoiceCategory.getDescription().contains("payment"), "Invoice description should mention payment"); + System.out.println(" ✓ Invoice category verified"); + + // Verify Bank_Statement category + assertTrue(result.getConfig().getContentCategories().containsKey("Bank_Statement"), + "Should contain Bank_Statement category"); + ContentCategoryDefinition bankCategory = result.getConfig().getContentCategories().get("Bank_Statement"); + assertNotNull(bankCategory.getDescription(), "Bank_Statement description should not be null"); + assertTrue(bankCategory.getDescription().contains("account activity"), + "Bank_Statement description should mention account activity"); + System.out.println(" ✓ Bank_Statement category verified"); + + // Verify enableSegment is set + assertNotNull(result.getConfig().isEnableSegment(), "EnableSegment should not be null"); + assertTrue(result.getConfig().isEnableSegment(), "EnableSegment should be true"); + System.out.println("✓ EnableSegment verified: " + result.getConfig().isEnableSegment()); + + // Verify returnDetails is set + assertNotNull(result.getConfig().isReturnDetails(), "ReturnDetails should not be null"); + assertTrue(result.getConfig().isReturnDetails(), "ReturnDetails should be true"); + System.out.println("✓ ReturnDetails verified: " + result.getConfig().isReturnDetails()); + + // Verify models + assertNotNull(result.getModels(), "Models should not be null"); + assertTrue(result.getModels().containsKey("completion"), "Should contain 'completion' model mapping"); + System.out.println("✓ Model mappings verified: " + result.getModels().size() + " model(s)"); + + System.out.println("\n════════════════════════════════════════════════════════════"); + System.out.println("✓ CLASSIFIER CREATION VERIFIED SUCCESSFULLY"); + System.out.println("════════════════════════════════════════════════════════════"); + System.out.println(" Analyzer ID: " + analyzerId); + System.out.println(" Base Analyzer: " + result.getBaseAnalyzerId()); + System.out.println(" Categories: " + result.getConfig().getContentCategories().size()); + System.out.println(" Segmentation: " + result.getConfig().isEnableSegment()); + System.out.println("════════════════════════════════════════════════════════════"); + // END:Assertion_ContentUnderstandingCreateClassifierAsync + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample06_GetAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample06_GetAnalyzer.java new file mode 100644 index 000000000000..3a18a435edc4 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample06_GetAnalyzer.java @@ -0,0 +1,137 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Sample demonstrating how to get analyzer information. + * This sample shows: + * 1. Retrieving analyzer details by ID + * 2. Accessing analyzer configuration + * 3. Inspecting field schema definitions + * 4. Getting prebuilt analyzer information + */ +public class Sample06_GetAnalyzer extends ContentUnderstandingClientTestBase { + + @Test + public void testGetAnalyzer() { + + // BEGIN:ContentUnderstandingGetAnalyzer + // Get a prebuilt analyzer (these are always available) + String analyzerId = "prebuilt-invoice"; + + ContentAnalyzer analyzer = contentUnderstandingClient.getAnalyzer(analyzerId); + + System.out.println("Analyzer ID: " + analyzer.getAnalyzerId()); + System.out.println( + "Base Analyzer ID: " + (analyzer.getBaseAnalyzerId() != null ? analyzer.getBaseAnalyzerId() : "N/A")); + System.out.println("Description: " + (analyzer.getDescription() != null ? analyzer.getDescription() : "N/A")); + + // Display configuration + if (analyzer.getConfig() != null) { + System.out.println("\nAnalyzer Configuration:"); + System.out.println(" Enable OCR: " + analyzer.getConfig().isEnableOcr()); + System.out.println(" Enable Layout: " + analyzer.getConfig().isEnableLayout()); + System.out.println(" Enable Formula: " + analyzer.getConfig().isEnableFormula()); + System.out.println( + " Estimate Field Source and Confidence: " + analyzer.getConfig().isEstimateFieldSourceAndConfidence()); + System.out.println(" Return Details: " + analyzer.getConfig().isReturnDetails()); + } + + // Display field schema if available + if (analyzer.getFieldSchema() != null) { + System.out.println("\nField Schema:"); + System.out.println(" Name: " + analyzer.getFieldSchema().getName()); + System.out.println(" Description: " + (analyzer.getFieldSchema().getDescription() != null + ? analyzer.getFieldSchema().getDescription() + : "N/A")); + if (analyzer.getFieldSchema().getFields() != null) { + System.out.println(" Number of fields: " + analyzer.getFieldSchema().getFields().size()); + System.out.println(" Fields:"); + analyzer.getFieldSchema().getFields().forEach((fieldName, fieldDef) -> { + System.out.println(" - " + fieldName + " (" + fieldDef.getType() + ", Method: " + + (fieldDef.getMethod() != null ? fieldDef.getMethod() : "N/A") + ")"); + if (fieldDef.getDescription() != null && !fieldDef.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + fieldDef.getDescription()); + } + }); + } + } + + // Display models if available + if (analyzer.getModels() != null && !analyzer.getModels().isEmpty()) { + System.out.println("\nModel Mappings:"); + analyzer.getModels().forEach((modelKey, modelValue) -> { + System.out.println(" " + modelKey + ": " + modelValue); + }); + } + + // Display status if available + if (analyzer.getStatus() != null) { + System.out.println("\nAnalyzer Status: " + analyzer.getStatus()); + } + + // Display created/updated timestamps if available + if (analyzer.getCreatedAt() != null) { + System.out.println("Created: " + analyzer.getCreatedAt()); + } + if (analyzer.getLastModifiedAt() != null) { + System.out.println("Updated: " + analyzer.getLastModifiedAt()); + } + // END:ContentUnderstandingGetAnalyzer + + // BEGIN:Assertion_ContentUnderstandingGetAnalyzer + assertNotNull(analyzerId, "Analyzer ID should not be null"); + assertNotNull(analyzer, "Analyzer should not be null"); + System.out.println("\nAnalyzer retrieved successfully"); + + // Verify analyzer ID + assertNotNull(analyzer.getAnalyzerId(), "Analyzer ID should not be null"); + assertEquals(analyzerId, analyzer.getAnalyzerId(), "Analyzer ID should match requested ID"); + System.out.println("Analyzer ID verified: " + analyzer.getAnalyzerId()); + + // Verify analyzer has configuration + assertNotNull(analyzer.getConfig(), "Analyzer config should not be null"); + System.out.println("Analyzer configuration verified"); + + // For prebuilt analyzers, verify they have field schema + if (analyzer.getFieldSchema() != null) { + assertNotNull(analyzer.getFieldSchema().getName(), "Field schema name should not be null"); + assertFalse(analyzer.getFieldSchema().getName().trim().isEmpty(), "Field schema name should not be empty"); + System.out.println("Field schema verified: " + analyzer.getFieldSchema().getName()); + + if (analyzer.getFieldSchema().getFields() != null) { + assertTrue(analyzer.getFieldSchema().getFields().size() > 0, + "Field schema should have at least one field"); + System.out.println("Field schema contains " + analyzer.getFieldSchema().getFields().size() + " fields"); + } + } + + System.out.println("All analyzer properties validated successfully"); + // END:Assertion_ContentUnderstandingGetAnalyzer + } + + @Test + public void testGetAnalyzerNotFound() { + // Test getting another prebuilt analyzer + String analyzerId = "prebuilt-document"; + + ContentAnalyzer analyzer = contentUnderstandingClient.getAnalyzer(analyzerId); + + System.out.println("\nRetrieving prebuilt-document analyzer..."); + System.out.println("Analyzer ID: " + analyzer.getAnalyzerId()); + System.out.println("Description: " + (analyzer.getDescription() != null ? analyzer.getDescription() : "N/A")); + + // Verify the analyzer + assertNotNull(analyzer, "Analyzer should not be null"); + assertEquals(analyzerId, analyzer.getAnalyzerId(), "Analyzer ID should match"); + assertNotNull(analyzer.getConfig(), "Analyzer config should not be null"); + System.out.println("Prebuilt-document analyzer verified successfully"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample06_GetAnalyzerAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample06_GetAnalyzerAsync.java new file mode 100644 index 000000000000..241b0c087823 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample06_GetAnalyzerAsync.java @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Async sample demonstrating how to get analyzer information. + * This sample shows: + * 1. Retrieving analyzer details by ID + * 2. Accessing analyzer configuration + * 3. Inspecting field schema definitions + * 4. Getting prebuilt analyzer information + */ +public class Sample06_GetAnalyzerAsync extends ContentUnderstandingClientTestBase { + + @Test + public void testGetAnalyzerAsync() { + + // BEGIN:ContentUnderstandingGetAnalyzerAsync + // Get a prebuilt analyzer (these are always available) + String analyzerId = "prebuilt-invoice"; + + ContentAnalyzer analyzer = contentUnderstandingAsyncClient.getAnalyzer(analyzerId).block(); + + System.out.println("Analyzer ID: " + analyzer.getAnalyzerId()); + System.out.println( + "Base Analyzer ID: " + (analyzer.getBaseAnalyzerId() != null ? analyzer.getBaseAnalyzerId() : "N/A")); + System.out.println("Description: " + (analyzer.getDescription() != null ? analyzer.getDescription() : "N/A")); + + // Display configuration + if (analyzer.getConfig() != null) { + System.out.println("\nAnalyzer Configuration:"); + System.out.println(" Enable OCR: " + analyzer.getConfig().isEnableOcr()); + System.out.println(" Enable Layout: " + analyzer.getConfig().isEnableLayout()); + System.out.println(" Enable Formula: " + analyzer.getConfig().isEnableFormula()); + System.out.println( + " Estimate Field Source and Confidence: " + analyzer.getConfig().isEstimateFieldSourceAndConfidence()); + System.out.println(" Return Details: " + analyzer.getConfig().isReturnDetails()); + } + + // Display field schema if available + if (analyzer.getFieldSchema() != null) { + System.out.println("\nField Schema:"); + System.out.println(" Name: " + analyzer.getFieldSchema().getName()); + System.out.println(" Description: " + (analyzer.getFieldSchema().getDescription() != null + ? analyzer.getFieldSchema().getDescription() + : "N/A")); + if (analyzer.getFieldSchema().getFields() != null) { + System.out.println(" Number of fields: " + analyzer.getFieldSchema().getFields().size()); + System.out.println(" Fields:"); + analyzer.getFieldSchema().getFields().forEach((fieldName, fieldDef) -> { + System.out.println(" - " + fieldName + " (" + fieldDef.getType() + ", Method: " + + (fieldDef.getMethod() != null ? fieldDef.getMethod() : "N/A") + ")"); + if (fieldDef.getDescription() != null && !fieldDef.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + fieldDef.getDescription()); + } + }); + } + } + + // Display models if available + if (analyzer.getModels() != null && !analyzer.getModels().isEmpty()) { + System.out.println("\nModel Mappings:"); + analyzer.getModels().forEach((modelKey, modelValue) -> { + System.out.println(" " + modelKey + ": " + modelValue); + }); + } + + // Display status if available + if (analyzer.getStatus() != null) { + System.out.println("\nAnalyzer Status: " + analyzer.getStatus()); + } + + // Display created/updated timestamps if available + if (analyzer.getCreatedAt() != null) { + System.out.println("Created: " + analyzer.getCreatedAt()); + } + if (analyzer.getLastModifiedAt() != null) { + System.out.println("Updated: " + analyzer.getLastModifiedAt()); + } + // END:ContentUnderstandingGetAnalyzerAsync + + // BEGIN:Assertion_ContentUnderstandingGetAnalyzerAsync + assertNotNull(analyzerId, "Analyzer ID should not be null"); + assertNotNull(analyzer, "Analyzer should not be null"); + System.out.println("\nAnalyzer retrieved successfully"); + + // Verify analyzer ID + assertNotNull(analyzer.getAnalyzerId(), "Analyzer ID should not be null"); + assertEquals(analyzerId, analyzer.getAnalyzerId(), "Analyzer ID should match requested ID"); + System.out.println("Analyzer ID verified: " + analyzer.getAnalyzerId()); + + // Verify analyzer has configuration + assertNotNull(analyzer.getConfig(), "Analyzer config should not be null"); + assertNotNull(analyzer.getConfig(), "Analyzer config should not be null"); + System.out.println("Analyzer configuration verified"); + + // For prebuilt analyzers, verify they have field schema + if (analyzer.getFieldSchema() != null) { + assertNotNull(analyzer.getFieldSchema().getName(), "Field schema name should not be null"); + assertFalse(analyzer.getFieldSchema().getName().trim().isEmpty(), "Field schema name should not be empty"); + System.out.println("Field schema verified: " + analyzer.getFieldSchema().getName()); + + if (analyzer.getFieldSchema().getFields() != null) { + assertTrue(analyzer.getFieldSchema().getFields().size() > 0, + "Field schema should have at least one field"); + System.out.println("Field schema contains " + analyzer.getFieldSchema().getFields().size() + " fields"); + } + } + + System.out.println("All analyzer properties validated successfully"); + // END:Assertion_ContentUnderstandingGetAnalyzerAsync + } + + @Test + public void testGetAnalyzerNotFoundAsync() { + // Test getting another prebuilt analyzer + String analyzerId = "prebuilt-document"; + + ContentAnalyzer analyzer = contentUnderstandingAsyncClient.getAnalyzer(analyzerId).block(); + + System.out.println("\nRetrieving prebuilt-document analyzer..."); + System.out.println("Analyzer ID: " + analyzer.getAnalyzerId()); + System.out.println("Description: " + (analyzer.getDescription() != null ? analyzer.getDescription() : "N/A")); + + // Verify the analyzer + assertNotNull(analyzer, "Analyzer should not be null"); + assertEquals(analyzerId, analyzer.getAnalyzerId(), "Analyzer ID should match"); + assertNotNull(analyzer.getConfig(), "Analyzer config should not be null"); + System.out.println("Prebuilt-document analyzer verified successfully"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample07_ListAnalyzers.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample07_ListAnalyzers.java new file mode 100644 index 000000000000..aea764306956 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample07_ListAnalyzers.java @@ -0,0 +1,147 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.core.http.rest.PagedIterable; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Sample demonstrating how to list all analyzers. + * This sample shows: + * 1. Listing all available analyzers (both prebuilt and custom) + * 2. Filtering analyzers by status + * 3. Iterating through paginated results + * 4. Displaying analyzer properties + */ +public class Sample07_ListAnalyzers extends ContentUnderstandingClientTestBase { + + @Test + public void testListAnalyzers() { + + // BEGIN:ContentUnderstandingListAnalyzers + // List all analyzers + PagedIterable analyzers = contentUnderstandingClient.listAnalyzers(); + + System.out.println("Listing all analyzers:"); + System.out.println("======================"); + + int count = 0; + int prebuiltCount = 0; + int customCount = 0; + + for (ContentAnalyzer analyzer : analyzers) { + count++; + + // Determine if this is a prebuilt or custom analyzer + boolean isPrebuilt = analyzer.getAnalyzerId().startsWith("prebuilt-"); + if (isPrebuilt) { + prebuiltCount++; + } else { + customCount++; + } + + System.out.println("\nAnalyzer #" + count + ":"); + System.out.println(" ID: " + analyzer.getAnalyzerId()); + System.out.println(" Type: " + (isPrebuilt ? "Prebuilt" : "Custom")); + + if (analyzer.getDescription() != null && !analyzer.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + analyzer.getDescription()); + } + + if (analyzer.getBaseAnalyzerId() != null) { + System.out.println(" Base Analyzer: " + analyzer.getBaseAnalyzerId()); + } + + if (analyzer.getStatus() != null) { + System.out.println(" Status: " + analyzer.getStatus()); + } + + if (analyzer.getCreatedAt() != null) { + System.out.println(" Created: " + analyzer.getCreatedAt()); + } + + if (analyzer.getLastModifiedAt() != null) { + System.out.println(" Last Modified: " + analyzer.getLastModifiedAt()); + } + + // Display field schema summary if available + if (analyzer.getFieldSchema() != null && analyzer.getFieldSchema().getFields() != null) { + System.out.println(" Fields: " + analyzer.getFieldSchema().getFields().size() + " field(s) defined"); + } + + // Display tags if available + if (analyzer.getTags() != null && !analyzer.getTags().isEmpty()) { + System.out.println(" Tags: " + analyzer.getTags().size() + " tag(s)"); + } + } + + System.out.println("\n======================"); + System.out.println("Total analyzers: " + count); + System.out.println(" Prebuilt: " + prebuiltCount); + System.out.println(" Custom: " + customCount); + // END:ContentUnderstandingListAnalyzers + + // BEGIN:Assertion_ContentUnderstandingListAnalyzers + assertNotNull(analyzers, "Analyzers list should not be null"); + System.out.println("\nAnalyzers list retrieved successfully"); + + // Verify we have at least the prebuilt analyzers + assertTrue(count > 0, "Should have at least one analyzer"); + assertTrue(prebuiltCount > 0, "Should have at least one prebuilt analyzer"); + System.out.println("Verified: Found " + count + " total analyzer(s)"); + System.out.println("Verified: Found " + prebuiltCount + " prebuilt analyzer(s)"); + if (customCount > 0) { + System.out.println("Verified: Found " + customCount + " custom analyzer(s)"); + } + + // Verify each analyzer has required properties + int validatedCount = 0; + for (ContentAnalyzer analyzer : analyzers) { + assertNotNull(analyzer.getAnalyzerId(), "Analyzer ID should not be null"); + assertFalse(analyzer.getAnalyzerId().trim().isEmpty(), "Analyzer ID should not be empty"); + assertNotNull(analyzer.getStatus(), "Analyzer status should not be null"); + validatedCount++; + + // Only validate first few to avoid excessive output + if (validatedCount >= 5) { + break; + } + } + + System.out.println("All analyzer list properties validated successfully"); + // END:Assertion_ContentUnderstandingListAnalyzers + } + + @Test + public void testListAnalyzersWithMaxResults() { + // List all analyzers and filter for ready ones + PagedIterable analyzers = contentUnderstandingClient.listAnalyzers(); + + System.out.println("\nListing ready analyzers:"); + System.out.println("========================"); + + int readyCount = 0; + for (ContentAnalyzer analyzer : analyzers) { + if (analyzer.getStatus() != null && "ready".equalsIgnoreCase(analyzer.getStatus().toString())) { + readyCount++; + System.out.println("\nReady Analyzer #" + readyCount + ":"); + System.out.println(" ID: " + analyzer.getAnalyzerId()); + if (analyzer.getDescription() != null) { + System.out.println(" Description: " + analyzer.getDescription()); + } + } + } + + System.out.println("\n========================"); + System.out.println("Total ready analyzers: " + readyCount); + + // Verify + assertTrue(readyCount > 0, "Should have at least one ready analyzer"); + System.out.println("Verified: Found " + readyCount + " ready analyzer(s)"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample07_ListAnalyzersAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample07_ListAnalyzersAsync.java new file mode 100644 index 000000000000..6e0c6eb32957 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample07_ListAnalyzersAsync.java @@ -0,0 +1,144 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.core.http.rest.PagedFlux; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Async sample demonstrating how to list all analyzers. + * This sample shows: + * 1. Listing all available analyzers (both prebuilt and custom) + * 2. Filtering analyzers by status + * 3. Iterating through paginated results + * 4. Displaying analyzer properties + */ +public class Sample07_ListAnalyzersAsync extends ContentUnderstandingClientTestBase { + + @Test + public void testListAnalyzersAsync() { + + // BEGIN:ContentUnderstandingListAnalyzersAsync + // List all analyzers + PagedFlux analyzers = contentUnderstandingAsyncClient.listAnalyzers(); + + System.out.println("Listing all analyzers:"); + System.out.println("======================"); + + final int[] count = { 0 }; + final int[] prebuiltCount = { 0 }; + final int[] customCount = { 0 }; + + analyzers.toIterable().forEach(analyzer -> { + count[0]++; + + // Determine if this is a prebuilt or custom analyzer + boolean isPrebuilt = analyzer.getAnalyzerId().startsWith("prebuilt-"); + if (isPrebuilt) { + prebuiltCount[0]++; + } else { + customCount[0]++; + } + + System.out.println("\nAnalyzer #" + count[0] + ":"); + System.out.println(" ID: " + analyzer.getAnalyzerId()); + System.out.println(" Type: " + (isPrebuilt ? "Prebuilt" : "Custom")); + + if (analyzer.getDescription() != null && !analyzer.getDescription().trim().isEmpty()) { + System.out.println(" Description: " + analyzer.getDescription()); + } + + if (analyzer.getBaseAnalyzerId() != null) { + System.out.println(" Base Analyzer: " + analyzer.getBaseAnalyzerId()); + } + + if (analyzer.getStatus() != null) { + System.out.println(" Status: " + analyzer.getStatus()); + } + + if (analyzer.getCreatedAt() != null) { + System.out.println(" Created: " + analyzer.getCreatedAt()); + } + + if (analyzer.getLastModifiedAt() != null) { + System.out.println(" Last Modified: " + analyzer.getLastModifiedAt()); + } + + // Display field schema summary if available + if (analyzer.getFieldSchema() != null && analyzer.getFieldSchema().getFields() != null) { + System.out.println(" Fields: " + analyzer.getFieldSchema().getFields().size() + " field(s) defined"); + } + + // Display tags if available + if (analyzer.getTags() != null && !analyzer.getTags().isEmpty()) { + System.out.println(" Tags: " + analyzer.getTags().size() + " tag(s)"); + } + }); + + System.out.println("\n======================"); + System.out.println("Total analyzers: " + count[0]); + System.out.println(" Prebuilt: " + prebuiltCount[0]); + System.out.println(" Custom: " + customCount[0]); + // END:ContentUnderstandingListAnalyzersAsync + + // BEGIN:Assertion_ContentUnderstandingListAnalyzersAsync + assertNotNull(analyzers, "Analyzers list should not be null"); + System.out.println("\nAnalyzers list retrieved successfully"); + + // Verify we have at least the prebuilt analyzers + assertTrue(count[0] > 0, "Should have at least one analyzer"); + assertTrue(prebuiltCount[0] > 0, "Should have at least one prebuilt analyzer"); + System.out.println("Verified: Found " + count[0] + " total analyzer(s)"); + System.out.println("Verified: Found " + prebuiltCount[0] + " prebuilt analyzer(s)"); + if (customCount[0] > 0) { + System.out.println("Verified: Found " + customCount[0] + " custom analyzer(s)"); + } + + // Verify each analyzer has required properties + final int[] validatedCount = { 0 }; + analyzers.toIterable().forEach(analyzer -> { + if (validatedCount[0] < 5) { + assertNotNull(analyzer.getAnalyzerId(), "Analyzer ID should not be null"); + assertFalse(analyzer.getAnalyzerId().trim().isEmpty(), "Analyzer ID should not be empty"); + assertNotNull(analyzer.getStatus(), "Analyzer status should not be null"); + validatedCount[0]++; + } + }); + + System.out.println("All analyzer list properties validated successfully"); + // END:Assertion_ContentUnderstandingListAnalyzersAsync + } + + @Test + public void testListAnalyzersWithMaxResultsAsync() { + // List all analyzers and filter for ready ones + PagedFlux analyzers = contentUnderstandingAsyncClient.listAnalyzers(); + + System.out.println("\nListing ready analyzers:"); + System.out.println("========================"); + + final int[] readyCount = { 0 }; + analyzers.toIterable().forEach(analyzer -> { + if (analyzer.getStatus() != null && "ready".equalsIgnoreCase(analyzer.getStatus().toString())) { + readyCount[0]++; + System.out.println("\nReady Analyzer #" + readyCount[0] + ":"); + System.out.println(" ID: " + analyzer.getAnalyzerId()); + if (analyzer.getDescription() != null) { + System.out.println(" Description: " + analyzer.getDescription()); + } + } + }); + + System.out.println("\n========================"); + System.out.println("Total ready analyzers: " + readyCount[0]); + + // Verify + assertTrue(readyCount[0] > 0, "Should have at least one ready analyzer"); + System.out.println("Verified: Found " + readyCount[0] + " ready analyzer(s)"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample08_UpdateAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample08_UpdateAnalyzer.java new file mode 100644 index 000000000000..f43357090f5d --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample08_UpdateAnalyzer.java @@ -0,0 +1,148 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.HashMap; +import java.util.Map; + +/** + * Sample demonstrating how to update an existing analyzer. + * This sample shows: + * 1. Creating an analyzer + * 2. Updating analyzer description + * 3. Updating analyzer configuration + * 4. Updating field schema + */ +public class Sample08_UpdateAnalyzer extends ContentUnderstandingClientTestBase { + + private String analyzerId; + + @BeforeEach + public void setup() { + // Create an analyzer for testing + analyzerId = testResourceNamer.randomName("update_test_analyzer_", 50); + + Map fields = new HashMap<>(); + ContentFieldDefinition titleDef = new ContentFieldDefinition(); + titleDef.setType(ContentFieldType.STRING); + titleDef.setMethod(GenerationMethod.EXTRACT); + titleDef.setDescription("Document title"); + fields.put("title", titleDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("basic_schema"); + fieldSchema.setDescription("Basic document schema"); + fieldSchema.setFields(fields); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Original analyzer for update testing") + .setConfig(new ContentAnalyzerConfig().setEnableOcr(true).setEnableLayout(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + contentUnderstandingClient.beginCreateAnalyzer(analyzerId, analyzer).getFinalResult(); + System.out.println("Test analyzer created: " + analyzerId); + } + + @AfterEach + public void cleanup() { + if (analyzerId != null) { + try { + contentUnderstandingClient.deleteAnalyzer(analyzerId); + System.out.println("Test analyzer deleted: " + analyzerId); + } catch (Exception e) { + // Ignore cleanup errors + } + } + } + + @Test + public void testUpdateAnalyzer() { + // BEGIN:ContentUnderstandingUpdateAnalyzer + // Get the current analyzer + ContentAnalyzer currentAnalyzer = contentUnderstandingClient.getAnalyzer(analyzerId); + System.out.println("Current description: " + currentAnalyzer.getDescription()); + + // Update the analyzer with new configuration + Map updatedFields = new HashMap<>(); + + // Keep the original field + ContentFieldDefinition titleDef = new ContentFieldDefinition(); + titleDef.setType(ContentFieldType.STRING); + titleDef.setMethod(GenerationMethod.EXTRACT); + titleDef.setDescription("Document title"); + updatedFields.put("title", titleDef); + + // Add a new field + ContentFieldDefinition authorDef = new ContentFieldDefinition(); + authorDef.setType(ContentFieldType.STRING); + authorDef.setMethod(GenerationMethod.EXTRACT); + authorDef.setDescription("Document author"); + updatedFields.put("author", authorDef); + + ContentFieldSchema updatedFieldSchema = new ContentFieldSchema(); + updatedFieldSchema.setName("enhanced_schema"); + updatedFieldSchema.setDescription("Enhanced document schema with author"); + updatedFieldSchema.setFields(updatedFields); + + Map updatedModels = new HashMap<>(); + updatedModels.put("completion", "gpt-4.1"); + updatedModels.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer updatedAnalyzer = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Updated analyzer with enhanced schema") + .setConfig(new ContentAnalyzerConfig().setEnableOcr(true).setEnableLayout(true).setEnableFormula(true)) // Enable formula extraction + .setFieldSchema(updatedFieldSchema) + .setModels(updatedModels); + + // Update the analyzer using the convenience method + // This method accepts a ContentAnalyzer object directly instead of BinaryData + ContentAnalyzer result = contentUnderstandingClient.updateAnalyzer(analyzerId, updatedAnalyzer); + + System.out.println("Analyzer updated successfully!"); + System.out.println("New description: " + result.getDescription()); + // END:ContentUnderstandingUpdateAnalyzer + + // BEGIN:Assertion_ContentUnderstandingUpdateAnalyzer + assertNotNull(result, "Updated analyzer should not be null"); + assertEquals(analyzerId, result.getAnalyzerId(), "Analyzer ID should match"); + assertEquals("Updated analyzer with enhanced schema", result.getDescription(), "Description should be updated"); + System.out.println("Analyzer description verified"); + + // Verify field schema was updated + assertNotNull(result.getFieldSchema(), "Field schema should not be null"); + assertEquals("enhanced_schema", result.getFieldSchema().getName(), "Field schema name should be updated"); + assertEquals(2, result.getFieldSchema().getFields().size(), "Should have 2 fields after update"); + assertTrue(result.getFieldSchema().getFields().containsKey("title"), "Should still contain title field"); + assertTrue(result.getFieldSchema().getFields().containsKey("author"), "Should contain new author field"); + System.out.println("Field schema update verified: " + result.getFieldSchema().getFields().size() + " fields"); + + // Verify config was updated + assertNotNull(result.getConfig(), "Config should not be null"); + assertTrue(result.getConfig().isEnableFormula(), "EnableFormula should now be true"); + System.out.println("Config update verified"); + + System.out.println("All analyzer update properties validated successfully"); + // END:Assertion_ContentUnderstandingUpdateAnalyzer + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample08_UpdateAnalyzerAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample08_UpdateAnalyzerAsync.java new file mode 100644 index 000000000000..b934c52aea85 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample08_UpdateAnalyzerAsync.java @@ -0,0 +1,156 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.util.polling.PollerFlux; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.HashMap; +import java.util.Map; + +/** + * Async sample demonstrating how to update an existing analyzer. + * This sample shows: + * 1. Creating an analyzer + * 2. Updating analyzer description + * 3. Updating analyzer configuration + * 4. Updating field schema + */ +public class Sample08_UpdateAnalyzerAsync extends ContentUnderstandingClientTestBase { + + private String analyzerId; + + @BeforeEach + public void setup() { + // Create an analyzer for testing + analyzerId = testResourceNamer.randomName("update_test_analyzer_", 50); + + Map fields = new HashMap<>(); + ContentFieldDefinition titleDef = new ContentFieldDefinition(); + titleDef.setType(ContentFieldType.STRING); + titleDef.setMethod(GenerationMethod.EXTRACT); + titleDef.setDescription("Document title"); + fields.put("title", titleDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("basic_schema"); + fieldSchema.setDescription("Basic document schema"); + fieldSchema.setFields(fields); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Original analyzer for update testing") + .setConfig(new ContentAnalyzerConfig().setEnableOcr(true).setEnableLayout(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + contentUnderstandingAsyncClient.beginCreateAnalyzer(analyzerId, analyzer).last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); + System.out.println("Test analyzer created: " + analyzerId); + } + + @AfterEach + public void cleanup() { + if (analyzerId != null) { + try { + contentUnderstandingAsyncClient.deleteAnalyzer(analyzerId).block(); + System.out.println("Test analyzer deleted: " + analyzerId); + } catch (Exception e) { + // Ignore cleanup errors + } + } + } + + @Test + public void testUpdateAnalyzerAsync() { + // BEGIN:ContentUnderstandingUpdateAnalyzerAsync + // Get the current analyzer + ContentAnalyzer currentAnalyzer = contentUnderstandingAsyncClient.getAnalyzer(analyzerId).block(); + System.out.println("Current description: " + currentAnalyzer.getDescription()); + + // Update the analyzer with new configuration + Map updatedFields = new HashMap<>(); + + // Keep the original field + ContentFieldDefinition titleDef = new ContentFieldDefinition(); + titleDef.setType(ContentFieldType.STRING); + titleDef.setMethod(GenerationMethod.EXTRACT); + titleDef.setDescription("Document title"); + updatedFields.put("title", titleDef); + + // Add a new field + ContentFieldDefinition authorDef = new ContentFieldDefinition(); + authorDef.setType(ContentFieldType.STRING); + authorDef.setMethod(GenerationMethod.EXTRACT); + authorDef.setDescription("Document author"); + updatedFields.put("author", authorDef); + + ContentFieldSchema updatedFieldSchema = new ContentFieldSchema(); + updatedFieldSchema.setName("enhanced_schema"); + updatedFieldSchema.setDescription("Enhanced document schema with author"); + updatedFieldSchema.setFields(updatedFields); + + Map updatedModels = new HashMap<>(); + updatedModels.put("completion", "gpt-4.1"); + updatedModels.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer updatedAnalyzer = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Updated analyzer with enhanced schema") + .setConfig(new ContentAnalyzerConfig().setEnableOcr(true).setEnableLayout(true).setEnableFormula(true)) // Enable formula extraction + .setFieldSchema(updatedFieldSchema) + .setModels(updatedModels); + + // Update the analyzer using the convenience method + // This method accepts a ContentAnalyzer object directly instead of BinaryData + ContentAnalyzer result = contentUnderstandingAsyncClient.updateAnalyzer(analyzerId, updatedAnalyzer).block(); + + System.out.println("Analyzer updated successfully!"); + System.out.println("New description: " + result.getDescription()); + // END:ContentUnderstandingUpdateAnalyzerAsync + + // BEGIN:Assertion_ContentUnderstandingUpdateAnalyzerAsync + assertNotNull(result, "Updated analyzer should not be null"); + assertEquals(analyzerId, result.getAnalyzerId(), "Analyzer ID should match"); + assertEquals("Updated analyzer with enhanced schema", result.getDescription(), "Description should be updated"); + System.out.println("Analyzer description verified"); + + // Verify field schema was updated + assertNotNull(result.getFieldSchema(), "Field schema should not be null"); + assertEquals("enhanced_schema", result.getFieldSchema().getName(), "Field schema name should be updated"); + assertEquals(2, result.getFieldSchema().getFields().size(), "Should have 2 fields after update"); + assertTrue(result.getFieldSchema().getFields().containsKey("title"), "Should still contain title field"); + assertTrue(result.getFieldSchema().getFields().containsKey("author"), "Should contain new author field"); + System.out.println("Field schema update verified: " + result.getFieldSchema().getFields().size() + " fields"); + + // Verify config was updated + assertNotNull(result.getConfig(), "Config should not be null"); + assertTrue(result.getConfig().isEnableFormula(), "EnableFormula should now be true"); + System.out.println("Config update verified"); + + System.out.println("All analyzer update properties validated successfully"); + // END:Assertion_ContentUnderstandingUpdateAnalyzerAsync + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample09_DeleteAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample09_DeleteAnalyzer.java new file mode 100644 index 000000000000..0573fb54613e --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample09_DeleteAnalyzer.java @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.exception.ResourceNotFoundException; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.HashMap; +import java.util.Map; + +/** + * Sample demonstrating how to delete an analyzer. + * This sample shows: + * 1. Creating a temporary analyzer + * 2. Verifying the analyzer exists + * 3. Deleting the analyzer + * 4. Verifying the analyzer no longer exists + */ +public class Sample09_DeleteAnalyzer extends ContentUnderstandingClientTestBase { + + @Test + public void testDeleteAnalyzer() { + + // BEGIN:ContentUnderstandingDeleteAnalyzer + // First, create a temporary analyzer to delete + String analyzerId = testResourceNamer.randomName("analyzer_to_delete_", 50); + + Map fields = new HashMap<>(); + ContentFieldDefinition titleDef = new ContentFieldDefinition(); + titleDef.setType(ContentFieldType.STRING); + titleDef.setMethod(GenerationMethod.EXTRACT); + titleDef.setDescription("Document title"); + fields.put("title", titleDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("temp_schema"); + fieldSchema.setDescription("Temporary schema for deletion demo"); + fieldSchema.setFields(fields); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Temporary analyzer for deletion demo") + .setConfig(new ContentAnalyzerConfig().setEnableOcr(true).setEnableLayout(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + contentUnderstandingClient.beginCreateAnalyzer(analyzerId, analyzer).getFinalResult(); + System.out.println("Temporary analyzer created: " + analyzerId); + + // Verify the analyzer exists + ContentAnalyzer retrievedAnalyzer = contentUnderstandingClient.getAnalyzer(analyzerId); + System.out.println("Verified analyzer exists with ID: " + retrievedAnalyzer.getAnalyzerId()); + + // Delete the analyzer + contentUnderstandingClient.deleteAnalyzer(analyzerId); + System.out.println("Analyzer deleted successfully: " + analyzerId); + + // Verify the analyzer no longer exists + boolean analyzerDeleted = false; + try { + contentUnderstandingClient.getAnalyzer(analyzerId); + } catch (ResourceNotFoundException e) { + analyzerDeleted = true; + System.out.println("Confirmed: Analyzer no longer exists"); + } + // END:ContentUnderstandingDeleteAnalyzer + + // BEGIN:Assertion_ContentUnderstandingDeleteAnalyzer + assertNotNull(analyzerId, "Analyzer ID should not be null"); + assertFalse(analyzerId.trim().isEmpty(), "Analyzer ID should not be empty"); + System.out.println("Analyzer ID verified: " + analyzerId); + + assertNotNull(retrievedAnalyzer, "Retrieved analyzer should not be null before deletion"); + assertEquals(analyzerId, retrievedAnalyzer.getAnalyzerId(), "Retrieved analyzer ID should match"); + System.out.println("Analyzer existence verified before deletion"); + + assertTrue(analyzerDeleted, "Analyzer should be deleted and not retrievable"); + System.out.println("Analyzer deletion verified"); + + System.out.println("All analyzer deletion properties validated successfully"); + // END:Assertion_ContentUnderstandingDeleteAnalyzer + } + + @Test + public void testDeleteNonexistentAnalyzer() { + // Try to delete a non-existent analyzer + String nonExistentId = testResourceNamer.randomName("non_existent_analyzer_", 50); + + System.out.println("\nAttempting to delete non-existent analyzer: " + nonExistentId); + + // Note: The SDK allows deleting non-existent analyzers without throwing an exception + // This is a valid behavior (idempotent delete operation) + try { + contentUnderstandingClient.deleteAnalyzer(nonExistentId); + System.out.println("Delete operation completed (idempotent - no error for non-existent resource)"); + } catch (ResourceNotFoundException e) { + System.out.println("ResourceNotFoundException caught (alternative behavior): " + e.getMessage()); + } + + System.out.println("Non-existent analyzer deletion behavior verified (SDK allows idempotent deletes)"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample09_DeleteAnalyzerAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample09_DeleteAnalyzerAsync.java new file mode 100644 index 000000000000..0fc14f614d6c --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample09_DeleteAnalyzerAsync.java @@ -0,0 +1,123 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.exception.ResourceNotFoundException; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.HashMap; +import java.util.Map; + +/** + * Async sample demonstrating how to delete an analyzer. + * This sample shows: + * 1. Creating a temporary analyzer + * 2. Verifying the analyzer exists + * 3. Deleting the analyzer + * 4. Verifying the analyzer no longer exists + */ +public class Sample09_DeleteAnalyzerAsync extends ContentUnderstandingClientTestBase { + + @Test + public void testDeleteAnalyzerAsync() { + + // BEGIN:ContentUnderstandingDeleteAnalyzerAsync + // First, create a temporary analyzer to delete + String analyzerId = testResourceNamer.randomName("analyzer_to_delete_", 50); + + Map fields = new HashMap<>(); + ContentFieldDefinition titleDef = new ContentFieldDefinition(); + titleDef.setType(ContentFieldType.STRING); + titleDef.setMethod(GenerationMethod.EXTRACT); + titleDef.setDescription("Document title"); + fields.put("title", titleDef); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("temp_schema"); + fieldSchema.setDescription("Temporary schema for deletion demo"); + fieldSchema.setFields(fields); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Temporary analyzer for deletion demo") + .setConfig(new ContentAnalyzerConfig().setEnableOcr(true).setEnableLayout(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + contentUnderstandingAsyncClient.beginCreateAnalyzer(analyzerId, analyzer).last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); + System.out.println("Temporary analyzer created: " + analyzerId); + + // Verify the analyzer exists + ContentAnalyzer retrievedAnalyzer = contentUnderstandingAsyncClient.getAnalyzer(analyzerId).block(); + System.out.println("Verified analyzer exists with ID: " + retrievedAnalyzer.getAnalyzerId()); + + // Delete the analyzer + contentUnderstandingAsyncClient.deleteAnalyzer(analyzerId).block(); + System.out.println("Analyzer deleted successfully: " + analyzerId); + + // Verify the analyzer no longer exists + boolean analyzerDeleted = false; + try { + contentUnderstandingAsyncClient.getAnalyzer(analyzerId).block(); + } catch (ResourceNotFoundException e) { + analyzerDeleted = true; + System.out.println("Confirmed: Analyzer no longer exists"); + } + // END:ContentUnderstandingDeleteAnalyzerAsync + + // BEGIN:Assertion_ContentUnderstandingDeleteAnalyzerAsync + assertNotNull(analyzerId, "Analyzer ID should not be null"); + assertFalse(analyzerId.trim().isEmpty(), "Analyzer ID should not be empty"); + System.out.println("Analyzer ID verified: " + analyzerId); + + assertNotNull(retrievedAnalyzer, "Retrieved analyzer should not be null before deletion"); + assertEquals(analyzerId, retrievedAnalyzer.getAnalyzerId(), "Retrieved analyzer ID should match"); + System.out.println("Analyzer existence verified before deletion"); + + assertTrue(analyzerDeleted, "Analyzer should be deleted and not retrievable"); + System.out.println("Analyzer deletion verified"); + + System.out.println("All analyzer deletion properties validated successfully"); + // END:Assertion_ContentUnderstandingDeleteAnalyzerAsync + } + + @Test + public void testDeleteNonexistentAnalyzerAsync() { + // Try to delete a non-existent analyzer + String nonExistentId = testResourceNamer.randomName("non_existent_analyzer_", 50); + + System.out.println("\nAttempting to delete non-existent analyzer: " + nonExistentId); + + // Note: The SDK allows deleting non-existent analyzers without throwing an exception + // This is a valid behavior (idempotent delete operation) + try { + contentUnderstandingAsyncClient.deleteAnalyzer(nonExistentId).block(); + System.out.println("Delete operation completed (idempotent - no error for non-existent resource)"); + } catch (ResourceNotFoundException e) { + System.out.println("ResourceNotFoundException caught (alternative behavior): " + e.getMessage()); + } + + System.out.println("Non-existent analyzer deletion behavior verified (SDK allows idempotent deletes)"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample10_AnalyzeConfigs.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample10_AnalyzeConfigs.java new file mode 100644 index 000000000000..acdb1fc50b14 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample10_AnalyzeConfigs.java @@ -0,0 +1,185 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentAnnotation; +import com.azure.ai.contentunderstanding.models.DocumentChartFigure; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentFormula; +import com.azure.ai.contentunderstanding.models.DocumentHyperlink; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Sample demonstrating how to analyze documents with advanced configs using prebuilt-documentSearch. + * This sample shows: + * 1. Using prebuilt-documentSearch analyzer which has formulas, layout, and OCR enabled + * 2. Extracting charts from documents + * 3. Extracting hyperlinks from documents + * 4. Extracting formulas from document pages + * 5. Extracting annotations from documents + */ +public class Sample10_AnalyzeConfigs extends ContentUnderstandingClientTestBase { + + @Test + public void testAnalyzeConfigs() throws IOException { + + // BEGIN:ContentUnderstandingAnalyzeWithConfigs + // Load local sample file + Path filePath = Paths.get("src/test/resources/sample_document_features.pdf"); + byte[] fileBytes = Files.readAllBytes(filePath); + BinaryData binaryData = BinaryData.fromBytes(fileBytes); + + System.out.println("Analyzing " + filePath + " with prebuilt-documentSearch..."); + System.out.println("Note: prebuilt-documentSearch has formulas, layout, and OCR enabled by default."); + + // Analyze with prebuilt-documentSearch which has formulas, layout, and OCR enabled + // These configs enable extraction of charts, annotations, hyperlinks, and formulas + SyncPoller operation + = contentUnderstandingClient.beginAnalyzeBinary("prebuilt-documentSearch", binaryData); + + AnalyzeResult result = operation.getFinalResult(); + // END:ContentUnderstandingAnalyzeWithConfigs + + // BEGIN:Assertion_ContentUnderstandingAnalyzeWithConfigs + assertNotNull(operation, "Analysis operation should not be null"); + assertTrue(operation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + System.out.println("Analysis operation properties verified"); + + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, result.getContents().size(), "PDF file should have exactly one content element"); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + + // Verify document content type + DocumentContent firstDocContent = result.getContents().get(0) instanceof DocumentContent + ? (DocumentContent) result.getContents().get(0) + : null; + assertNotNull(firstDocContent, "Content should be DocumentContent"); + assertTrue(firstDocContent.getStartPageNumber() >= 1, "Start page should be >= 1"); + assertTrue(firstDocContent.getEndPageNumber() >= firstDocContent.getStartPageNumber(), + "End page should be >= start page"); + int totalPages = firstDocContent.getEndPageNumber() - firstDocContent.getStartPageNumber() + 1; + System.out.println("Document has " + totalPages + " page(s) from " + firstDocContent.getStartPageNumber() + + " to " + firstDocContent.getEndPageNumber()); + System.out.println("Document features analysis with configs completed successfully"); + // END:Assertion_ContentUnderstandingAnalyzeWithConfigs + + // BEGIN:ContentUnderstandingExtractCharts + // Extract charts from document content + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) result.getContents().get(0); + + if (documentContent.getFigures() != null && !documentContent.getFigures().isEmpty()) { + List chartFigures = documentContent.getFigures() + .stream() + .filter(f -> f instanceof DocumentChartFigure) + .map(f -> (DocumentChartFigure) f) + .collect(Collectors.toList()); + + System.out.println("Found " + chartFigures.size() + " chart(s)"); + for (DocumentChartFigure chart : chartFigures) { + System.out.println(" Chart ID: " + chart.getId()); + if (chart.getDescription() != null && !chart.getDescription().isEmpty()) { + System.out.println(" Description: " + chart.getDescription()); + } + if (chart.getCaption() != null + && chart.getCaption().getContent() != null + && !chart.getCaption().getContent().isEmpty()) { + System.out.println(" Caption: " + chart.getCaption().getContent()); + } + } + } + } + // END:ContentUnderstandingExtractCharts + + // BEGIN:ContentUnderstandingExtractHyperlinks + // Extract hyperlinks from document content + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) result.getContents().get(0); + + if (docContent.getHyperlinks() != null && !docContent.getHyperlinks().isEmpty()) { + System.out.println("\nFound " + docContent.getHyperlinks().size() + " hyperlink(s)"); + for (DocumentHyperlink hyperlink : docContent.getHyperlinks()) { + System.out + .println(" URL: " + (hyperlink.getUrl() != null ? hyperlink.getUrl() : "(not available)")); + System.out.println(" Content: " + + (hyperlink.getContent() != null ? hyperlink.getContent() : "(not available)")); + } + } + } + // END:ContentUnderstandingExtractHyperlinks + + // BEGIN:ContentUnderstandingExtractFormulas + // Extract formulas from document pages + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent content = (DocumentContent) result.getContents().get(0); + + if (content.getPages() != null) { + int formulaCount = 0; + for (com.azure.ai.contentunderstanding.models.DocumentPage page : content.getPages()) { + if (page.getFormulas() != null) { + formulaCount += page.getFormulas().size(); + } + } + + if (formulaCount > 0) { + System.out.println("\nFound " + formulaCount + " formula(s)"); + for (com.azure.ai.contentunderstanding.models.DocumentPage page : content.getPages()) { + if (page.getFormulas() != null) { + for (DocumentFormula formula : page.getFormulas()) { + System.out.println(" Formula Kind: " + formula.getKind()); + System.out.println(" LaTeX: " + + (formula.getValue() != null ? formula.getValue() : "(not available)")); + if (formula.getConfidence() != null) { + System.out.println(String.format(" Confidence: %.2f", formula.getConfidence())); + } + } + } + } + } + } + } + // END:ContentUnderstandingExtractFormulas + + // BEGIN:ContentUnderstandingExtractAnnotations + // Extract annotations from document content + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent document = (DocumentContent) result.getContents().get(0); + + if (document.getAnnotations() != null && !document.getAnnotations().isEmpty()) { + System.out.println("\nFound " + document.getAnnotations().size() + " annotation(s)"); + for (DocumentAnnotation annotation : document.getAnnotations()) { + System.out.println(" Annotation ID: " + annotation.getId()); + System.out.println(" Kind: " + annotation.getKind()); + if (annotation.getAuthor() != null && !annotation.getAuthor().isEmpty()) { + System.out.println(" Author: " + annotation.getAuthor()); + } + if (annotation.getComments() != null && !annotation.getComments().isEmpty()) { + System.out.println(" Comments: " + annotation.getComments().size()); + for (com.azure.ai.contentunderstanding.models.DocumentAnnotationComment comment : annotation + .getComments()) { + System.out.println(" - " + comment.getMessage()); + } + } + } + } + } + // END:ContentUnderstandingExtractAnnotations + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample10_AnalyzeConfigsAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample10_AnalyzeConfigsAsync.java new file mode 100644 index 000000000000..77efeab563e4 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample10_AnalyzeConfigsAsync.java @@ -0,0 +1,193 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentAnnotation; +import com.azure.ai.contentunderstanding.models.DocumentChartFigure; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.DocumentFormula; +import com.azure.ai.contentunderstanding.models.DocumentHyperlink; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.PollerFlux; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static org.junit.jupiter.api.Assertions.*; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Async sample demonstrating how to analyze documents with advanced configs using prebuilt-documentSearch. + * This sample shows: + * 1. Using prebuilt-documentSearch analyzer which has formulas, layout, and OCR enabled + * 2. Extracting charts from documents + * 3. Extracting hyperlinks from documents + * 4. Extracting formulas from document pages + * 5. Extracting annotations from documents + */ +public class Sample10_AnalyzeConfigsAsync extends ContentUnderstandingClientTestBase { + + @Test + public void testAnalyzeConfigsAsync() throws IOException { + + // BEGIN:ContentUnderstandingAnalyzeWithConfigsAsync + // Load local sample file + Path filePath = Paths.get("src/test/resources/sample_document_features.pdf"); + byte[] fileBytes = Files.readAllBytes(filePath); + BinaryData binaryData = BinaryData.fromBytes(fileBytes); + + System.out.println("Analyzing " + filePath + " with prebuilt-documentSearch..."); + System.out.println("Note: prebuilt-documentSearch has formulas, layout, and OCR enabled by default."); + + // Analyze with prebuilt-documentSearch which has formulas, layout, and OCR enabled + // These configs enable extraction of charts, annotations, hyperlinks, and formulas + PollerFlux operation + = contentUnderstandingAsyncClient.beginAnalyzeBinary("prebuilt-documentSearch", binaryData); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + AnalyzeResult result = operation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + // END:ContentUnderstandingAnalyzeWithConfigsAsync + + // BEGIN:Assertion_ContentUnderstandingAnalyzeWithConfigsAsync + assertNotNull(operation, "Analysis operation should not be null"); + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, result.getContents().size(), "PDF file should have exactly one content element"); + System.out.println("Analysis operation properties verified"); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + + // Verify document content type + DocumentContent firstDocContent = result.getContents().get(0) instanceof DocumentContent + ? (DocumentContent) result.getContents().get(0) + : null; + assertNotNull(firstDocContent, "Content should be DocumentContent"); + assertTrue(firstDocContent.getStartPageNumber() >= 1, "Start page should be >= 1"); + assertTrue(firstDocContent.getEndPageNumber() >= firstDocContent.getStartPageNumber(), + "End page should be >= start page"); + int totalPages = firstDocContent.getEndPageNumber() - firstDocContent.getStartPageNumber() + 1; + System.out.println("Document has " + totalPages + " page(s) from " + firstDocContent.getStartPageNumber() + + " to " + firstDocContent.getEndPageNumber()); + System.out.println("Document features analysis with configs completed successfully"); + // END:Assertion_ContentUnderstandingAnalyzeWithConfigsAsync + + // BEGIN:ContentUnderstandingExtractChartsAsync + // Extract charts from document content + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent documentContent = (DocumentContent) result.getContents().get(0); + + if (documentContent.getFigures() != null && !documentContent.getFigures().isEmpty()) { + List chartFigures = documentContent.getFigures() + .stream() + .filter(f -> f instanceof DocumentChartFigure) + .map(f -> (DocumentChartFigure) f) + .collect(Collectors.toList()); + + System.out.println("Found " + chartFigures.size() + " chart(s)"); + for (DocumentChartFigure chart : chartFigures) { + System.out.println(" Chart ID: " + chart.getId()); + if (chart.getDescription() != null && !chart.getDescription().isEmpty()) { + System.out.println(" Description: " + chart.getDescription()); + } + if (chart.getCaption() != null + && chart.getCaption().getContent() != null + && !chart.getCaption().getContent().isEmpty()) { + System.out.println(" Caption: " + chart.getCaption().getContent()); + } + } + } + } + // END:ContentUnderstandingExtractChartsAsync + + // BEGIN:ContentUnderstandingExtractHyperlinksAsync + // Extract hyperlinks from document content + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) result.getContents().get(0); + + if (docContent.getHyperlinks() != null && !docContent.getHyperlinks().isEmpty()) { + System.out.println("\nFound " + docContent.getHyperlinks().size() + " hyperlink(s)"); + for (DocumentHyperlink hyperlink : docContent.getHyperlinks()) { + System.out + .println(" URL: " + (hyperlink.getUrl() != null ? hyperlink.getUrl() : "(not available)")); + System.out.println(" Content: " + + (hyperlink.getContent() != null ? hyperlink.getContent() : "(not available)")); + } + } + } + // END:ContentUnderstandingExtractHyperlinksAsync + + // BEGIN:ContentUnderstandingExtractFormulasAsync + // Extract formulas from document pages + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent content = (DocumentContent) result.getContents().get(0); + + if (content.getPages() != null) { + int formulaCount = 0; + for (com.azure.ai.contentunderstanding.models.DocumentPage page : content.getPages()) { + if (page.getFormulas() != null) { + formulaCount += page.getFormulas().size(); + } + } + + if (formulaCount > 0) { + System.out.println("\nFound " + formulaCount + " formula(s)"); + for (com.azure.ai.contentunderstanding.models.DocumentPage page : content.getPages()) { + if (page.getFormulas() != null) { + for (DocumentFormula formula : page.getFormulas()) { + System.out.println(" Formula Kind: " + formula.getKind()); + System.out.println(" LaTeX: " + + (formula.getValue() != null ? formula.getValue() : "(not available)")); + if (formula.getConfidence() != null) { + System.out.println(String.format(" Confidence: %.2f", formula.getConfidence())); + } + } + } + } + } + } + } + // END:ContentUnderstandingExtractFormulasAsync + + // BEGIN:ContentUnderstandingExtractAnnotationsAsync + // Extract annotations from document content + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent document = (DocumentContent) result.getContents().get(0); + + if (document.getAnnotations() != null && !document.getAnnotations().isEmpty()) { + System.out.println("\nFound " + document.getAnnotations().size() + " annotation(s)"); + for (DocumentAnnotation annotation : document.getAnnotations()) { + System.out.println(" Annotation ID: " + annotation.getId()); + System.out.println(" Kind: " + annotation.getKind()); + if (annotation.getAuthor() != null && !annotation.getAuthor().isEmpty()) { + System.out.println(" Author: " + annotation.getAuthor()); + } + if (annotation.getComments() != null && !annotation.getComments().isEmpty()) { + System.out.println(" Comments: " + annotation.getComments().size()); + for (com.azure.ai.contentunderstanding.models.DocumentAnnotationComment comment : annotation + .getComments()) { + System.out.println(" - " + comment.getMessage()); + } + } + } + } + } + // END:ContentUnderstandingExtractAnnotationsAsync + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample11_AnalyzeReturnRawJson.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample11_AnalyzeReturnRawJson.java new file mode 100644 index 000000000000..6d68ae095d3b --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample11_AnalyzeReturnRawJson.java @@ -0,0 +1,155 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.core.http.rest.RequestOptions; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.SyncPoller; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; + +/** + * Sample demonstrating how to analyze documents and get raw JSON response using protocol methods. + * This sample shows: + * 1. Using protocol method to get raw JSON response instead of strongly-typed objects + * 2. Parsing raw JSON response + * 3. Pretty-printing and saving JSON to file + * + * Note: For production use, prefer the object model approach (beginAnalyzeBinary with typed parameters) + * which returns AnalyzeResult objects that are easier to work with. + */ +public class Sample11_AnalyzeReturnRawJson extends ContentUnderstandingClientTestBase { + + @Test + public void testAnalyzeReturnRawJson() throws IOException { + + // BEGIN:ContentUnderstandingAnalyzeReturnRawJson + // Load local test file + Path filePath = Paths.get("src/test/resources/sample_invoice.pdf"); + byte[] fileBytes = Files.readAllBytes(filePath); + + // Prepare request body with binary data using JSON format + // Note: The API expects a JSON request with "inputs" array containing document data + String base64Data = java.util.Base64.getEncoder().encodeToString(fileBytes); + String requestJson = String.format("{\"inputs\": [{\"data\": \"%s\"}]}", base64Data); + BinaryData requestBody = BinaryData.fromString(requestJson); + + // Use protocol method to get raw JSON response + // Note: For production use, prefer the object model approach (beginAnalyze with typed parameters) + // which returns AnalyzeResult objects that are easier to work with + SyncPoller operation + = contentUnderstandingClient.beginAnalyze("prebuilt-documentSearch", requestBody, new RequestOptions()); + + BinaryData responseData = operation.getFinalResult(); + // END:ContentUnderstandingAnalyzeReturnRawJson + + // BEGIN:Assertion_ContentUnderstandingAnalyzeReturnRawJson + assertTrue(Files.exists(filePath), "Sample file should exist at " + filePath); + assertTrue(fileBytes.length > 0, "File should not be empty"); + System.out.println("File loaded: " + filePath + " (" + String.format("%,d", fileBytes.length) + " bytes)"); + + assertNotNull(operation, "Analysis operation should not be null"); + assertTrue(operation.waitForCompletion().getStatus().isComplete(), "Operation should be completed"); + System.out.println("Analysis operation completed with status: " + operation.poll().getStatus()); + + assertNotNull(responseData, "Response data should not be null"); + assertTrue(responseData.toBytes().length > 0, "Response data should not be empty"); + System.out.println("Response data size: " + String.format("%,d", responseData.toBytes().length) + " bytes"); + + // Verify response data can be converted to string + String responseString = responseData.toString(); + assertNotNull(responseString, "Response string should not be null"); + assertTrue(responseString.length() > 0, "Response string should not be empty"); + System.out.println("Response string length: " + String.format("%,d", responseString.length()) + " characters"); + + // Verify response is valid JSON format + try { + ObjectMapper mapper = new ObjectMapper(); + JsonNode jsonNode = mapper.readTree(responseData.toBytes()); + assertNotNull(jsonNode, "Response should be valid JSON"); + System.out.println("Response is valid JSON format"); + } catch (Exception ex) { + fail("Response data is not valid JSON: " + ex.getMessage()); + } + + System.out.println("Raw JSON analysis operation completed successfully"); + // END:Assertion_ContentUnderstandingAnalyzeReturnRawJson + + // BEGIN:ContentUnderstandingParseRawJson + // Parse the raw JSON response + ObjectMapper mapper = new ObjectMapper(); + JsonNode jsonNode = mapper.readTree(responseData.toBytes()); + + // Pretty-print the JSON + String prettyJson = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonNode); + + // Create output directory if it doesn't exist + Path outputDir = Paths.get("target/sample_output"); + Files.createDirectories(outputDir); + + // Save to file + String timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss")); + String outputFileName = "analyze_result_" + timestamp + ".json"; + Path outputPath = outputDir.resolve(outputFileName); + Files.write(outputPath, prettyJson.getBytes(java.nio.charset.StandardCharsets.UTF_8)); + + System.out.println("Raw JSON response saved to: " + outputPath); + System.out.println("File size: " + String.format("%,d", prettyJson.length()) + " characters"); + // END:ContentUnderstandingParseRawJson + + // BEGIN:Assertion_ContentUnderstandingParseRawJson + assertNotNull(jsonNode, "JSON node should not be null"); + System.out.println("JSON document parsed successfully"); + + assertNotNull(prettyJson, "Pretty JSON string should not be null"); + assertTrue(prettyJson.length() > 0, "Pretty JSON should not be empty"); + assertTrue(prettyJson.length() >= responseData.toString().length(), + "Pretty JSON should be same size or larger than original (due to indentation)"); + System.out.println("Pretty JSON generated: " + String.format("%,d", prettyJson.length()) + " characters"); + + // Verify JSON is properly indented + assertTrue(prettyJson.contains("\n"), "Pretty JSON should contain line breaks"); + assertTrue(prettyJson.contains(" "), "Pretty JSON should contain indentation"); + System.out.println("JSON is properly formatted with indentation"); + + // Verify output directory + assertNotNull(outputDir, "Output directory path should not be null"); + assertTrue(Files.exists(outputDir), "Output directory should exist at " + outputDir); + System.out.println("Output directory verified: " + outputDir); + + // Verify output file name format + assertNotNull(outputFileName, "Output file name should not be null"); + assertTrue(outputFileName.startsWith("analyze_result_"), + "Output file name should start with 'analyze_result_'"); + assertTrue(outputFileName.endsWith(".json"), "Output file name should end with '.json'"); + System.out.println("Output file name: " + outputFileName); + + // Verify output file path + assertNotNull(outputPath, "Output file path should not be null"); + assertTrue(outputPath.toString().contains(outputDir.toString()), "Output path should contain output directory"); + assertTrue(outputPath.toString().endsWith(".json"), "Output path should end with '.json'"); + assertTrue(Files.exists(outputPath), "Output file should exist at " + outputPath); + System.out.println("Output file created: " + outputPath); + + // Verify file content size + long fileSize = Files.size(outputPath); + assertTrue(fileSize > 0, "Output file should not be empty"); + assertEquals(prettyJson.length(), fileSize, "File size should match pretty JSON length"); + System.out.println("Output file size verified: " + String.format("%,d", fileSize) + " bytes"); + + System.out.println("Raw JSON parsing and saving completed successfully"); + // END:Assertion_ContentUnderstandingParseRawJson + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample11_AnalyzeReturnRawJsonAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample11_AnalyzeReturnRawJsonAsync.java new file mode 100644 index 000000000000..7c0aa932ed2c --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample11_AnalyzeReturnRawJsonAsync.java @@ -0,0 +1,162 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.core.http.rest.RequestOptions; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.PollerFlux; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static org.junit.jupiter.api.Assertions.*; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; + +/** + * Async sample demonstrating how to analyze documents and get raw JSON response using protocol methods. + * This sample shows: + * 1. Using protocol method to get raw JSON response instead of strongly-typed objects + * 2. Parsing raw JSON response + * 3. Pretty-printing and saving JSON to file + * + * Note: For production use, prefer the object model approach (beginAnalyzeBinary with typed parameters) + * which returns AnalyzeResult objects that are easier to work with. + */ +public class Sample11_AnalyzeReturnRawJsonAsync extends ContentUnderstandingClientTestBase { + + @Test + public void testAnalyzeReturnRawJsonAsync() throws IOException { + + // BEGIN:ContentUnderstandingAnalyzeReturnRawJsonAsync + // Load local test file + Path filePath = Paths.get("src/test/resources/sample_invoice.pdf"); + byte[] fileBytes = Files.readAllBytes(filePath); + + // Prepare request body with binary data using JSON format + // Note: The API expects a JSON request with "inputs" array containing document data + String base64Data = java.util.Base64.getEncoder().encodeToString(fileBytes); + String requestJson = String.format("{\"inputs\": [{\"data\": \"%s\"}]}", base64Data); + BinaryData requestBody = BinaryData.fromString(requestJson); + + // Use protocol method to get raw JSON response + // Note: For production use, prefer the object model approach (beginAnalyze with typed parameters) + // which returns AnalyzeResult objects that are easier to work with + PollerFlux operation = contentUnderstandingAsyncClient + .beginAnalyze("prebuilt-documentSearch", requestBody, new RequestOptions()); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + BinaryData responseData = operation.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + // END:ContentUnderstandingAnalyzeReturnRawJsonAsync + + // BEGIN:Assertion_ContentUnderstandingAnalyzeReturnRawJsonAsync + assertTrue(Files.exists(filePath), "Sample file should exist at " + filePath); + assertTrue(fileBytes.length > 0, "File should not be empty"); + System.out.println("File loaded: " + filePath + " (" + String.format("%,d", fileBytes.length) + " bytes)"); + + assertNotNull(operation, "Analysis operation should not be null"); + assertNotNull(responseData, "Response data should not be null"); + assertTrue(responseData.toBytes().length > 0, "Response data should not be empty"); + System.out.println("Response data size: " + String.format("%,d", responseData.toBytes().length) + " bytes"); + + // Verify response data can be converted to string + String responseString = responseData.toString(); + assertNotNull(responseString, "Response string should not be null"); + assertTrue(responseString.length() > 0, "Response string should not be empty"); + System.out.println("Response string length: " + String.format("%,d", responseString.length()) + " characters"); + + // Verify response is valid JSON format + try { + ObjectMapper mapper = new ObjectMapper(); + JsonNode jsonNode = mapper.readTree(responseData.toBytes()); + assertNotNull(jsonNode, "Response should be valid JSON"); + System.out.println("Response is valid JSON format"); + } catch (Exception ex) { + fail("Response data is not valid JSON: " + ex.getMessage()); + } + + System.out.println("Raw JSON analysis operation completed successfully"); + // END:Assertion_ContentUnderstandingAnalyzeReturnRawJsonAsync + + // BEGIN:ContentUnderstandingParseRawJsonAsync + // Parse the raw JSON response + ObjectMapper mapper = new ObjectMapper(); + JsonNode jsonNode = mapper.readTree(responseData.toBytes()); + + // Pretty-print the JSON + String prettyJson = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonNode); + + // Create output directory if it doesn't exist + Path outputDir = Paths.get("target/sample_output"); + Files.createDirectories(outputDir); + + // Save to file + String timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss")); + String outputFileName = "analyze_result_" + timestamp + ".json"; + Path outputPath = outputDir.resolve(outputFileName); + Files.write(outputPath, prettyJson.getBytes(java.nio.charset.StandardCharsets.UTF_8)); + + System.out.println("Raw JSON response saved to: " + outputPath); + System.out.println("File size: " + String.format("%,d", prettyJson.length()) + " characters"); + // END:ContentUnderstandingParseRawJsonAsync + + // BEGIN:Assertion_ContentUnderstandingParseRawJsonAsync + assertNotNull(jsonNode, "JSON node should not be null"); + System.out.println("JSON document parsed successfully"); + + assertNotNull(prettyJson, "Pretty JSON string should not be null"); + assertTrue(prettyJson.length() > 0, "Pretty JSON should not be empty"); + assertTrue(prettyJson.length() >= responseData.toString().length(), + "Pretty JSON should be same size or larger than original (due to indentation)"); + System.out.println("Pretty JSON generated: " + String.format("%,d", prettyJson.length()) + " characters"); + + // Verify JSON is properly indented + assertTrue(prettyJson.contains("\n"), "Pretty JSON should contain line breaks"); + assertTrue(prettyJson.contains(" "), "Pretty JSON should contain indentation"); + System.out.println("JSON is properly formatted with indentation"); + + // Verify output directory + assertNotNull(outputDir, "Output directory path should not be null"); + assertTrue(Files.exists(outputDir), "Output directory should exist at " + outputDir); + System.out.println("Output directory verified: " + outputDir); + + // Verify output file name format + assertNotNull(outputFileName, "Output file name should not be null"); + assertTrue(outputFileName.startsWith("analyze_result_"), + "Output file name should start with 'analyze_result_'"); + assertTrue(outputFileName.endsWith(".json"), "Output file name should end with '.json'"); + System.out.println("Output file name: " + outputFileName); + + // Verify output file path + assertNotNull(outputPath, "Output file path should not be null"); + assertTrue(outputPath.toString().contains(outputDir.toString()), "Output path should contain output directory"); + assertTrue(outputPath.toString().endsWith(".json"), "Output path should end with '.json'"); + assertTrue(Files.exists(outputPath), "Output file should exist at " + outputPath); + System.out.println("Output file created: " + outputPath); + + // Verify file content size + long fileSize = Files.size(outputPath); + assertTrue(fileSize > 0, "Output file should not be empty"); + assertEquals(prettyJson.length(), fileSize, "File size should match pretty JSON length"); + System.out.println("Output file size verified: " + String.format("%,d", fileSize) + " bytes"); + + System.out.println("Raw JSON parsing and saving completed successfully"); + // END:Assertion_ContentUnderstandingParseRawJsonAsync + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample12_GetResultFile.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample12_GetResultFile.java new file mode 100644 index 000000000000..d077c4b3d3ed --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample12_GetResultFile.java @@ -0,0 +1,294 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.AudioVisualContent; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Sample demonstrates how to retrieve result files (like keyframe images) from video analysis operations. + */ +public class Sample12_GetResultFile extends ContentUnderstandingClientTestBase { + + /** + * Synchronous sample for getting result files from a completed analysis operation. + *

+ * Note: The Azure Content Understanding service requires extended time after analysis + * completion for keyframe result files to become available. This test uses retry logic + * to handle the delay. + */ + @Test + public void testGetResultFile() throws IOException { + + // BEGIN: com.azure.ai.contentunderstanding.getResultFile + // For video analysis, use a video URL to get keyframes + String videoUrl + = "https://github.com/Azure-Samples/azure-ai-content-understanding-assets/raw/refs/heads/main/videos/sdk_samples/FlightSimulator.mp4"; + + // Step 1: Start the video analysis operation + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(videoUrl); + + SyncPoller poller + = contentUnderstandingClient.beginAnalyze("prebuilt-videoSearch", Arrays.asList(input)); + + System.out.println("Started analysis operation"); + + // Wait for completion + AnalyzeResult result = poller.getFinalResult(); + System.out.println("Analysis completed successfully!"); + + // Get the operation ID from the polling result using the getOperationId() convenience method + // The operation ID is extracted from the Operation-Location header and can be used with + // getResultFile() and deleteResult() APIs + String operationId = poller.poll().getValue().getOperationId(); + System.out.println("Operation ID: " + operationId); + + // END: com.azure.ai.contentunderstanding.getResultFile + + // Verify operation started and completed + assertNotNull(videoUrl, "Video URL should not be null"); + System.out.println("Video URL: " + videoUrl); + + assertNotNull(operationId, "Operation ID should not be null"); + assertFalse(operationId.trim().isEmpty(), "Operation ID should not be empty"); + assertTrue(operationId.length() > 0, "Operation ID should have length > 0"); + assertFalse(operationId.contains(" "), "Operation ID should not contain spaces"); + System.out.println("Operation ID obtained: " + operationId); + System.out.println(" Length: " + operationId.length() + " characters"); + + // Verify result + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + + // BEGIN: com.azure.ai.contentunderstanding.getResultFile.keyframes + // Step 2: Get keyframes from video analysis result + AudioVisualContent videoContent = null; + for (Object content : result.getContents()) { + if (content instanceof AudioVisualContent) { + videoContent = (AudioVisualContent) content; + break; + } + } + + if (videoContent != null + && videoContent.getKeyFrameTimesMs() != null + && !videoContent.getKeyFrameTimesMs().isEmpty()) { + List keyFrameTimes = videoContent.getKeyFrameTimesMs(); + System.out.println("Total keyframes: " + keyFrameTimes.size()); + + // Get the first keyframe + long firstFrameTimeMs = keyFrameTimes.get(0); + System.out.println("First keyframe time: " + firstFrameTimeMs + " ms"); + + // Construct the keyframe path + String framePath = "keyframes/" + firstFrameTimeMs; + System.out.println("Getting result file: " + framePath); + + // Retrieve the keyframe image using convenience method with retry logic + // Result files may not be immediately available after analysis completion + BinaryData fileData = null; + int maxRetries = 12; + int retryDelayMs = 10000; + for (int attempt = 1; attempt <= maxRetries; attempt++) { + try { + fileData = contentUnderstandingClient.getResultFile(operationId, framePath); + break; // Success, exit retry loop + } catch (Exception e) { + if (attempt == maxRetries) { + throw e; // Re-throw on final attempt + } + System.out.println("Attempt " + attempt + " failed: " + e.getMessage()); + System.out.println("Waiting " + (retryDelayMs / 1000) + " seconds before retry..."); + try { + Thread.sleep(retryDelayMs); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Interrupted while waiting for retry", ie); + } + } + } + byte[] imageBytes = fileData.toBytes(); + System.out.println("Retrieved keyframe image (" + String.format("%,d", imageBytes.length) + " bytes)"); + + // Save the keyframe image + Path outputDir = Paths.get("target", "sample_output"); + Files.createDirectories(outputDir); + String outputFileName = "keyframe_" + firstFrameTimeMs + ".jpg"; + Path outputPath = outputDir.resolve(outputFileName); + Files.write(outputPath, imageBytes); + + System.out.println("Keyframe image saved to: " + outputPath.toAbsolutePath()); + // END: com.azure.ai.contentunderstanding.getResultFile.keyframes + + // Verify video content + assertNotNull(videoContent, "Video content should not be null"); + assertNotNull(keyFrameTimes, "KeyFrameTimesMs should not be null"); + assertTrue(keyFrameTimes.size() > 0, "Should have at least one keyframe"); + System.out.println("\n🎬 Keyframe Information:"); + System.out.println("Total keyframes: " + keyFrameTimes.size()); + + // Verify keyframe times are valid + for (long frameTime : keyFrameTimes) { + assertTrue(frameTime >= 0, "Keyframe time should be non-negative, but was " + frameTime); + } + + // Get keyframe statistics + long lastFrameTimeMs = keyFrameTimes.get(keyFrameTimes.size() - 1); + double avgFrameInterval = keyFrameTimes.size() > 1 + ? (double) (lastFrameTimeMs - firstFrameTimeMs) / (keyFrameTimes.size() - 1) + : 0; + + assertTrue(firstFrameTimeMs >= 0, "First keyframe time should be >= 0"); + assertTrue(lastFrameTimeMs >= firstFrameTimeMs, "Last keyframe time should be >= first keyframe time"); + + System.out.println(" First keyframe: " + firstFrameTimeMs + " ms (" + + String.format("%.2f", firstFrameTimeMs / 1000.0) + " seconds)"); + System.out.println(" Last keyframe: " + lastFrameTimeMs + " ms (" + + String.format("%.2f", lastFrameTimeMs / 1000.0) + " seconds)"); + if (keyFrameTimes.size() > 1) { + System.out.println(" Average interval: " + String.format("%.2f", avgFrameInterval) + " ms"); + } + + // Verify file data + System.out.println("\n📥 File Data Verification:"); + assertNotNull(fileData, "File data should not be null"); + + // Verify image data + System.out.println("\nVerifying image data..."); + assertNotNull(imageBytes, "Image bytes should not be null"); + assertTrue(imageBytes.length > 0, "Image should have content"); + assertTrue(imageBytes.length >= 100, "Image should have reasonable size (>= 100 bytes)"); + System.out.println("Image size: " + String.format("%,d", imageBytes.length) + " bytes (" + + String.format("%.2f", imageBytes.length / 1024.0) + " KB)"); + + // Verify image format + String imageFormat = detectImageFormat(imageBytes); + System.out.println("Detected image format: " + imageFormat); + assertNotEquals("Unknown", imageFormat, "Image format should be recognized"); + + // Verify saved file + System.out.println("\n💾 Saved File Verification:"); + assertTrue(Files.exists(outputPath), "Saved file should exist"); + long fileSize = Files.size(outputPath); + assertTrue(fileSize > 0, "Saved file should have content"); + assertEquals(imageBytes.length, fileSize, "Saved file size should match image size"); + System.out.println("File saved: " + outputPath.toAbsolutePath()); + System.out.println("File size verified: " + String.format("%,d", fileSize) + " bytes"); + + // Verify file can be read back + byte[] readBackBytes = Files.readAllBytes(outputPath); + assertEquals(imageBytes.length, readBackBytes.length, "Read back file size should match original"); + System.out.println("File content verified (read back matches original)"); + + // Test additional keyframes if available + if (keyFrameTimes.size() > 1) { + System.out + .println("\nTesting additional keyframes (" + (keyFrameTimes.size() - 1) + " more available)..."); + int middleIndex = keyFrameTimes.size() / 2; + long middleFrameTimeMs = keyFrameTimes.get(middleIndex); + String middleFramePath = "keyframes/" + middleFrameTimeMs; + + BinaryData middleFileData = contentUnderstandingClient.getResultFile(operationId, middleFramePath); + assertNotNull(middleFileData, "Middle keyframe data should not be null"); + assertTrue(middleFileData.toBytes().length > 0, "Middle keyframe should have content"); + System.out.println( + "Successfully retrieved keyframe at index " + middleIndex + " (" + middleFrameTimeMs + " ms)"); + System.out.println(" Size: " + String.format("%,d", middleFileData.toBytes().length) + " bytes"); + } + + // Summary + System.out.println("\n✅ Keyframe retrieval verification completed successfully:"); + System.out.println(" Operation ID: " + operationId); + System.out.println(" Total keyframes: " + keyFrameTimes.size()); + System.out.println(" First keyframe time: " + firstFrameTimeMs + " ms"); + System.out.println(" Image format: " + imageFormat); + System.out.println(" Image size: " + String.format("%,d", imageBytes.length) + " bytes"); + System.out.println(" Saved to: " + outputPath.toAbsolutePath()); + System.out.println(" File verified: Yes"); + } else { + // No video content (expected for document analysis) + System.out.println("\n📚 GetResultFile API Usage Example:"); + System.out.println(" For video analysis with keyframes:"); + System.out.println(" 1. Analyze video with prebuilt-videoSearch"); + System.out.println(" 2. Get keyframe times from AudioVisualContent.getKeyFrameTimesMs()"); + System.out.println(" 3. Retrieve keyframes using getResultFile():"); + System.out.println(" BinaryData fileData = contentUnderstandingClient.getResultFile(\"" + operationId + + "\", \"keyframes/1000\");"); + System.out.println(" 4. Save or process the keyframe image"); + + // Verify content type + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) result.getContents().get(0); + System.out.println("\nContent type: DocumentContent (as expected)"); + System.out.println(" MIME type: " + + (docContent.getMimeType() != null ? docContent.getMimeType() : "(not specified)")); + System.out + .println(" Pages: " + docContent.getStartPageNumber() + " - " + docContent.getEndPageNumber()); + } + + assertNotNull(operationId, "Operation ID should be available for GetResultFile API"); + assertFalse(operationId.trim().isEmpty(), "Operation ID should not be empty"); + System.out.println("Operation ID available for GetResultFile API: " + operationId); + } + } + + /** + * Detect image format from magic bytes. + */ + private String detectImageFormat(byte[] imageBytes) { + if (imageBytes.length < 2) { + return "Unknown"; + } + + // Check JPEG magic bytes (FF D8) + if (imageBytes[0] == (byte) 0xFF && imageBytes[1] == (byte) 0xD8) { + return "JPEG"; + } + + // Check PNG magic bytes (89 50 4E 47) + if (imageBytes.length >= 4 + && imageBytes[0] == (byte) 0x89 + && imageBytes[1] == 0x50 + && imageBytes[2] == 0x4E + && imageBytes[3] == 0x47) { + return "PNG"; + } + + // Check GIF magic bytes (47 49 46) + if (imageBytes.length >= 3 && imageBytes[0] == 0x47 && imageBytes[1] == 0x49 && imageBytes[2] == 0x46) { + return "GIF"; + } + + // Check WebP magic bytes (52 49 46 46 ... 57 45 42 50) + if (imageBytes.length >= 12 + && imageBytes[0] == 0x52 + && imageBytes[1] == 0x49 + && imageBytes[8] == 0x57 + && imageBytes[9] == 0x45 + && imageBytes[10] == 0x42 + && imageBytes[11] == 0x50) { + return "WebP"; + } + + return "Unknown"; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample12_GetResultFileAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample12_GetResultFileAsync.java new file mode 100644 index 000000000000..c364577c7fc8 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample12_GetResultFileAsync.java @@ -0,0 +1,307 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.AudioVisualContent; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.core.util.BinaryData; +import com.azure.core.util.polling.PollerFlux; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Async sample demonstrates how to retrieve result files (like keyframe images) from video analysis operations. + */ +public class Sample12_GetResultFileAsync extends ContentUnderstandingClientTestBase { + + /** + * Asynchronous sample for getting result files from a completed analysis operation. + *

+ * Note: The Azure Content Understanding service requires extended time after analysis + * completion for keyframe result files to become available. This test uses retry logic + * to handle the delay. + */ + @Test + public void testGetResultFileAsync() throws IOException { + + // BEGIN: com.azure.ai.contentunderstanding.getResultFileAsync + // For video analysis, use a video URL to get keyframes + String videoUrl + = "https://github.com/Azure-Samples/azure-ai-content-understanding-assets/raw/refs/heads/main/videos/sdk_samples/FlightSimulator.mp4"; + + // Step 1: Start the video analysis operation + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(videoUrl); + + PollerFlux poller + = contentUnderstandingAsyncClient.beginAnalyze("prebuilt-videoSearch", Arrays.asList(input)); + + System.out.println("Started analysis operation"); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + // Use AtomicReference to capture the operation ID from the polling response + AtomicReference operationIdRef = new AtomicReference<>(); + AnalyzeResult result = poller.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + // Capture the operation ID for later use with getResultFile() + operationIdRef.set(pollResponse.getValue().getOperationId()); + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + System.out.println("Analysis completed successfully!"); + String operationId = operationIdRef.get(); + System.out.println("Operation ID: " + operationId); + + // END: com.azure.ai.contentunderstanding.getResultFileAsync + + // Verify operation started and completed + assertNotNull(videoUrl, "Video URL should not be null"); + System.out.println("Video URL: " + videoUrl); + + assertNotNull(operationId, "Operation ID should not be null"); + assertFalse(operationId.trim().isEmpty(), "Operation ID should not be empty"); + assertTrue(operationId.length() > 0, "Operation ID should have length > 0"); + assertFalse(operationId.contains(" "), "Operation ID should not contain spaces"); + System.out.println("Operation ID obtained: " + operationId); + System.out.println(" Length: " + operationId.length() + " characters"); + + // Verify result + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + + // BEGIN: com.azure.ai.contentunderstanding.getResultFileAsync.keyframes + // Step 2: Get keyframes from video analysis result + AudioVisualContent videoContent = null; + for (Object content : result.getContents()) { + if (content instanceof AudioVisualContent) { + videoContent = (AudioVisualContent) content; + break; + } + } + + if (videoContent != null + && videoContent.getKeyFrameTimesMs() != null + && !videoContent.getKeyFrameTimesMs().isEmpty()) { + List keyFrameTimes = videoContent.getKeyFrameTimesMs(); + System.out.println("Total keyframes: " + keyFrameTimes.size()); + + // Get the first keyframe + long firstFrameTimeMs = keyFrameTimes.get(0); + System.out.println("First keyframe time: " + firstFrameTimeMs + " ms"); + + // Construct the keyframe path + String framePath = "keyframes/" + firstFrameTimeMs; + System.out.println("Getting result file: " + framePath); + + // Retrieve the keyframe image using convenience method with retry logic + // Result files may not be immediately available after analysis completion + BinaryData fileData = null; + int maxRetries = 12; + int retryDelayMs = 10000; + for (int attempt = 1; attempt <= maxRetries; attempt++) { + try { + fileData = contentUnderstandingAsyncClient.getResultFile(operationId, framePath).block(); + break; // Success, exit retry loop + } catch (Exception e) { + if (attempt == maxRetries) { + throw e; // Re-throw on final attempt + } + System.out.println("Attempt " + attempt + " failed: " + e.getMessage()); + System.out.println("Waiting " + (retryDelayMs / 1000) + " seconds before retry..."); + try { + Thread.sleep(retryDelayMs); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Interrupted while waiting for retry", ie); + } + } + } + byte[] imageBytes = fileData.toBytes(); + System.out.println("Retrieved keyframe image (" + String.format("%,d", imageBytes.length) + " bytes)"); + + // Save the keyframe image + Path outputDir = Paths.get("target", "sample_output"); + Files.createDirectories(outputDir); + String outputFileName = "keyframe_" + firstFrameTimeMs + ".jpg"; + Path outputPath = outputDir.resolve(outputFileName); + Files.write(outputPath, imageBytes); + + System.out.println("Keyframe image saved to: " + outputPath.toAbsolutePath()); + // END: com.azure.ai.contentunderstanding.getResultFileAsync.keyframes + + // Verify video content + assertNotNull(videoContent, "Video content should not be null"); + assertNotNull(keyFrameTimes, "KeyFrameTimesMs should not be null"); + assertTrue(keyFrameTimes.size() > 0, "Should have at least one keyframe"); + System.out.println("\n🎬 Keyframe Information:"); + System.out.println("Total keyframes: " + keyFrameTimes.size()); + + // Verify keyframe times are valid + for (long frameTime : keyFrameTimes) { + assertTrue(frameTime >= 0, "Keyframe time should be non-negative, but was " + frameTime); + } + + // Get keyframe statistics + long lastFrameTimeMs = keyFrameTimes.get(keyFrameTimes.size() - 1); + double avgFrameInterval = keyFrameTimes.size() > 1 + ? (double) (lastFrameTimeMs - firstFrameTimeMs) / (keyFrameTimes.size() - 1) + : 0; + + assertTrue(firstFrameTimeMs >= 0, "First keyframe time should be >= 0"); + assertTrue(lastFrameTimeMs >= firstFrameTimeMs, "Last keyframe time should be >= first keyframe time"); + + System.out.println(" First keyframe: " + firstFrameTimeMs + " ms (" + + String.format("%.2f", firstFrameTimeMs / 1000.0) + " seconds)"); + System.out.println(" Last keyframe: " + lastFrameTimeMs + " ms (" + + String.format("%.2f", lastFrameTimeMs / 1000.0) + " seconds)"); + if (keyFrameTimes.size() > 1) { + System.out.println(" Average interval: " + String.format("%.2f", avgFrameInterval) + " ms"); + } + + // Verify file data + System.out.println("\n📥 File Data Verification:"); + assertNotNull(fileData, "File data should not be null"); + + // Verify image data + System.out.println("\nVerifying image data..."); + assertNotNull(imageBytes, "Image bytes should not be null"); + assertTrue(imageBytes.length > 0, "Image should have content"); + assertTrue(imageBytes.length >= 100, "Image should have reasonable size (>= 100 bytes)"); + System.out.println("Image size: " + String.format("%,d", imageBytes.length) + " bytes (" + + String.format("%.2f", imageBytes.length / 1024.0) + " KB)"); + + // Verify image format + String imageFormat = detectImageFormat(imageBytes); + System.out.println("Detected image format: " + imageFormat); + assertNotEquals("Unknown", imageFormat, "Image format should be recognized"); + + // Verify saved file + System.out.println("\n💾 Saved File Verification:"); + assertTrue(Files.exists(outputPath), "Saved file should exist"); + long fileSize = Files.size(outputPath); + assertTrue(fileSize > 0, "Saved file should have content"); + assertEquals(imageBytes.length, fileSize, "Saved file size should match image size"); + System.out.println("File saved: " + outputPath.toAbsolutePath()); + System.out.println("File size verified: " + String.format("%,d", fileSize) + " bytes"); + + // Verify file can be read back + byte[] readBackBytes = Files.readAllBytes(outputPath); + assertEquals(imageBytes.length, readBackBytes.length, "Read back file size should match original"); + System.out.println("File content verified (read back matches original)"); + + // Test additional keyframes if available + if (keyFrameTimes.size() > 1) { + System.out + .println("\nTesting additional keyframes (" + (keyFrameTimes.size() - 1) + " more available)..."); + int middleIndex = keyFrameTimes.size() / 2; + long middleFrameTimeMs = keyFrameTimes.get(middleIndex); + String middleFramePath = "keyframes/" + middleFrameTimeMs; + + BinaryData middleFileData + = contentUnderstandingAsyncClient.getResultFile(operationId, middleFramePath).block(); + assertNotNull(middleFileData, "Middle keyframe data should not be null"); + assertTrue(middleFileData.toBytes().length > 0, "Middle keyframe should have content"); + System.out.println( + "Successfully retrieved keyframe at index " + middleIndex + " (" + middleFrameTimeMs + " ms)"); + System.out.println(" Size: " + String.format("%,d", middleFileData.toBytes().length) + " bytes"); + } + + // Summary + System.out.println("\n✅ Keyframe retrieval verification completed successfully:"); + System.out.println(" Operation ID: " + operationId); + System.out.println(" Total keyframes: " + keyFrameTimes.size()); + System.out.println(" First keyframe time: " + firstFrameTimeMs + " ms"); + System.out.println(" Image format: " + imageFormat); + System.out.println(" Image size: " + String.format("%,d", imageBytes.length) + " bytes"); + System.out.println(" Saved to: " + outputPath.toAbsolutePath()); + System.out.println(" File verified: Yes"); + } else { + // No video content (expected for document analysis) + System.out.println("\n📚 GetResultFile API Usage Example:"); + System.out.println(" For video analysis with keyframes:"); + System.out.println(" 1. Analyze video with prebuilt-videoSearch"); + System.out.println(" 2. Get keyframe times from AudioVisualContent.getKeyFrameTimesMs()"); + System.out.println(" 3. Retrieve keyframes using getResultFile():"); + System.out.println(" BinaryData fileData = contentUnderstandingAsyncClient.getResultFile(\"" + + operationId + "\", \"keyframes/1000\").block();"); + System.out.println(" 4. Save or process the keyframe image"); + + // Verify content type + if (result.getContents().get(0) instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) result.getContents().get(0); + System.out.println("\nContent type: DocumentContent (as expected)"); + System.out.println(" MIME type: " + + (docContent.getMimeType() != null ? docContent.getMimeType() : "(not specified)")); + System.out + .println(" Pages: " + docContent.getStartPageNumber() + " - " + docContent.getEndPageNumber()); + } + + assertNotNull(operationId, "Operation ID should be available for GetResultFile API"); + assertFalse(operationId.trim().isEmpty(), "Operation ID should not be empty"); + System.out.println("Operation ID available for GetResultFile API: " + operationId); + } + } + + /** + * Detect image format from magic bytes. + */ + private String detectImageFormat(byte[] imageBytes) { + if (imageBytes.length < 2) { + return "Unknown"; + } + + // Check JPEG magic bytes (FF D8) + if (imageBytes[0] == (byte) 0xFF && imageBytes[1] == (byte) 0xD8) { + return "JPEG"; + } + + // Check PNG magic bytes (89 50 4E 47) + if (imageBytes.length >= 4 + && imageBytes[0] == (byte) 0x89 + && imageBytes[1] == 0x50 + && imageBytes[2] == 0x4E + && imageBytes[3] == 0x47) { + return "PNG"; + } + + // Check GIF magic bytes (47 49 46) + if (imageBytes.length >= 3 && imageBytes[0] == 0x47 && imageBytes[1] == 0x49 && imageBytes[2] == 0x46) { + return "GIF"; + } + + // Check WebP magic bytes (52 49 46 46 ... 57 45 42 50) + if (imageBytes.length >= 12 + && imageBytes[0] == 0x52 + && imageBytes[1] == 0x49 + && imageBytes[8] == 0x57 + && imageBytes[9] == 0x45 + && imageBytes[10] == 0x42 + && imageBytes[11] == 0x50) { + return "WebP"; + } + + return "Unknown"; + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample13_DeleteResult.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample13_DeleteResult.java new file mode 100644 index 000000000000..ec2bc1056e32 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample13_DeleteResult.java @@ -0,0 +1,109 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Sample demonstrates how to delete analysis results after they are no longer needed. + */ +public class Sample13_DeleteResult extends ContentUnderstandingClientTestBase { + + /** + * Synchronous sample for analyzing a document and then deleting the result. + */ + @Test + public void testDeleteResult() { + + // BEGIN: com.azure.ai.contentunderstanding.deleteResult + // Step 1: Analyze a document + String documentUrl + = "https://github.com/Azure-Samples/cognitive-services-REST-api-samples/raw/master/curl/form-recognizer/sample-invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(documentUrl); + + SyncPoller poller + = contentUnderstandingClient.beginAnalyze("prebuilt-invoice", Arrays.asList(input)); + + // Wait for operation to complete to get a result ID + System.out.println("Started analysis operation"); + + // Wait for completion + AnalyzeResult result = poller.getFinalResult(); + System.out.println("Analysis completed successfully!"); + + // Get the operation ID using the getOperationId() convenience method + // This ID is extracted from the Operation-Location header and is needed for deleteResult() + String operationId = poller.poll().getValue().getOperationId(); + System.out.println("Operation ID: " + operationId); + + // Display some sample results using getValue() convenience method + if (result.getContents() != null && !result.getContents().isEmpty()) { + Object firstContent = result.getContents().get(0); + if (firstContent instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) firstContent; + Map fields = docContent.getFields(); + if (fields != null) { + System.out.println("Total fields extracted: " + fields.size()); + ContentField customerNameField = fields.get("CustomerName"); + if (customerNameField != null) { + // Use getValue() instead of casting to StringField + String customerName = (String) customerNameField.getValue(); + System.out.println("Customer Name: " + (customerName != null ? customerName : "(not found)")); + } + } + } + } + + // Step 2: Delete the analysis result using the operation ID + // This cleans up the server-side resources (including keyframe images for video analysis) + contentUnderstandingClient.deleteResult(operationId); + System.out.println("Analysis result deleted successfully!"); + // END: com.azure.ai.contentunderstanding.deleteResult + + // Verify operation + System.out.println("\n📋 Analysis Operation Verification:"); + assertNotNull(documentUrl, "Document URL should not be null"); + System.out.println("Document URL: " + documentUrl); + System.out.println("Analysis operation completed successfully"); + + // Verify result + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, result.getContents().size(), "Invoice should have exactly one content element"); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + + // Verify document content + Object firstContent = result.getContents().get(0); + assertTrue(firstContent instanceof DocumentContent, "Content should be DocumentContent"); + DocumentContent documentContent = (DocumentContent) firstContent; + assertNotNull(documentContent.getFields(), "Document content should have fields"); + System.out.println("Document content has " + documentContent.getFields().size() + " field(s)"); + + // API Pattern Demo + System.out.println("\n🗑️ Result Deletion API Pattern:"); + System.out.println(" contentUnderstandingClient.deleteResultWithResponse(resultId, requestOptions)"); + System.out.println(" Use the result ID from the analysis operation for cleanup"); + + // Summary + System.out.println("\n✅ DeleteResult API pattern demonstrated:"); + System.out.println(" Analysis: Completed successfully"); + System.out.println(" Fields extracted: " + documentContent.getFields().size()); + System.out.println(" API: deleteResultWithResponse available for cleanup"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample13_DeleteResultAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample13_DeleteResultAsync.java new file mode 100644 index 000000000000..6043e84567b5 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample13_DeleteResultAsync.java @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerAnalyzeOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.core.util.polling.PollerFlux; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Async sample demonstrates how to delete analysis results after they are no longer needed. + */ +public class Sample13_DeleteResultAsync extends ContentUnderstandingClientTestBase { + + /** + * Asynchronous sample for analyzing a document and then deleting the result. + */ + @Test + public void testDeleteResultAsync() { + + // BEGIN: com.azure.ai.contentunderstanding.deleteResultAsync + // Step 1: Analyze a document + String documentUrl + = "https://github.com/Azure-Samples/cognitive-services-REST-api-samples/raw/master/curl/form-recognizer/sample-invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(documentUrl); + + PollerFlux poller + = contentUnderstandingAsyncClient.beginAnalyze("prebuilt-invoice", Arrays.asList(input)); + + // Wait for operation to complete to get a result ID + System.out.println("Started analysis operation"); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + // Use AtomicReference to capture the operation ID from the polling response + AtomicReference operationIdRef = new AtomicReference<>(); + AnalyzeResult result = poller.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + // Capture the operation ID for later use with deleteResult() + operationIdRef.set(pollResponse.getValue().getOperationId()); + return pollResponse.getFinalResult(); + } else { + return Mono.error( + new RuntimeException("Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + System.out.println("Analysis completed successfully!"); + String operationId = operationIdRef.get(); + System.out.println("Operation ID: " + operationId); + + // Display some sample results using getValue() convenience method + if (result.getContents() != null && !result.getContents().isEmpty()) { + Object firstContent = result.getContents().get(0); + if (firstContent instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) firstContent; + Map fields = docContent.getFields(); + if (fields != null) { + System.out.println("Total fields extracted: " + fields.size()); + ContentField customerNameField = fields.get("CustomerName"); + if (customerNameField != null) { + // Use getValue() instead of casting to StringField + String customerName = (String) customerNameField.getValue(); + System.out.println("Customer Name: " + (customerName != null ? customerName : "(not found)")); + } + } + } + } + + // Step 2: Delete the analysis result using the operation ID + // This cleans up the server-side resources (including keyframe images for video analysis) + contentUnderstandingAsyncClient.deleteResult(operationId).block(); + System.out.println("Analysis result deleted successfully!"); + // END: com.azure.ai.contentunderstanding.deleteResultAsync + + // Verify operation + System.out.println("\n📋 Analysis Operation Verification:"); + assertNotNull(documentUrl, "Document URL should not be null"); + System.out.println("Document URL: " + documentUrl); + System.out.println("Analysis operation completed successfully"); + + // Verify result + assertNotNull(result, "Analysis result should not be null"); + assertNotNull(result.getContents(), "Result should contain contents"); + assertTrue(result.getContents().size() > 0, "Result should have at least one content"); + assertEquals(1, result.getContents().size(), "Invoice should have exactly one content element"); + System.out.println("Analysis result contains " + result.getContents().size() + " content(s)"); + + // Verify document content + Object firstContent = result.getContents().get(0); + assertTrue(firstContent instanceof DocumentContent, "Content should be DocumentContent"); + DocumentContent documentContent = (DocumentContent) firstContent; + assertNotNull(documentContent.getFields(), "Document content should have fields"); + System.out.println("Document content has " + documentContent.getFields().size() + " field(s)"); + + // API Pattern Demo + System.out.println("\n🗑️ Result Deletion API Pattern:"); + System.out.println(" contentUnderstandingAsyncClient.deleteResult(resultId).block()"); + System.out.println(" Use the result ID from the analysis operation for cleanup"); + + // Summary + System.out.println("\n✅ DeleteResult API pattern demonstrated:"); + System.out.println(" Analysis: Completed successfully"); + System.out.println(" Fields extracted: " + documentContent.getFields().size()); + System.out.println(" API: deleteResult available for cleanup"); + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample14_CopyAnalyzer.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample14_CopyAnalyzer.java new file mode 100644 index 000000000000..847f0e22d799 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample14_CopyAnalyzer.java @@ -0,0 +1,370 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Sample demonstrates how to copy an analyzer within the same resource. + * For cross-resource copying, see Sample15_GrantCopyAuth. + */ +public class Sample14_CopyAnalyzer extends ContentUnderstandingClientTestBase { + + /** + * Synchronous sample for copying an analyzer. + */ + @Test + public void testCopyAnalyzer() { + System.out.println("✓ Client initialized successfully"); + + // Generate unique analyzer IDs for this test + String sourceAnalyzerId = testResourceNamer.randomName("test_analyzer_source_", 50); + String targetAnalyzerId = testResourceNamer.randomName("test_analyzer_target_", 50); + + try { + // BEGIN: com.azure.ai.contentunderstanding.copyAnalyzer + // Step 1: Create the source analyzer + ContentAnalyzerConfig sourceConfig = new ContentAnalyzerConfig(); + sourceConfig.setEnableFormula(false); + sourceConfig.setEnableLayout(true); + sourceConfig.setEnableOcr(true); + sourceConfig.setEstimateFieldSourceAndConfidence(true); + sourceConfig.setReturnDetails(true); + + Map fields = new HashMap<>(); + + ContentFieldDefinition companyNameField = new ContentFieldDefinition(); + companyNameField.setType(ContentFieldType.STRING); + companyNameField.setMethod(GenerationMethod.EXTRACT); + companyNameField.setDescription("Name of the company"); + fields.put("company_name", companyNameField); + + ContentFieldDefinition totalAmountField = new ContentFieldDefinition(); + totalAmountField.setType(ContentFieldType.NUMBER); + totalAmountField.setMethod(GenerationMethod.EXTRACT); + totalAmountField.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountField); + + ContentFieldSchema sourceFieldSchema = new ContentFieldSchema(); + sourceFieldSchema.setName("company_schema"); + sourceFieldSchema.setDescription("Schema for extracting company information"); + sourceFieldSchema.setFields(fields); + + ContentAnalyzer sourceAnalyzer = new ContentAnalyzer(); + sourceAnalyzer.setBaseAnalyzerId("prebuilt-document"); + sourceAnalyzer.setDescription("Source analyzer for copying"); + sourceAnalyzer.setConfig(sourceConfig); + sourceAnalyzer.setFieldSchema(sourceFieldSchema); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + sourceAnalyzer.setModels(models); + + Map tags = new HashMap<>(); + tags.put("modelType", "in_development"); + sourceAnalyzer.setTags(tags); + + // Create source analyzer + SyncPoller createPoller + = contentUnderstandingClient.beginCreateAnalyzer(sourceAnalyzerId, sourceAnalyzer); + ContentAnalyzer sourceResult = createPoller.getFinalResult(); + System.out.println("Source analyzer '" + sourceAnalyzerId + "' created successfully!"); + + // Verify source analyzer is available before copying (ensure it's fully provisioned) + ContentAnalyzer verifiedSource = contentUnderstandingClient.getAnalyzer(sourceAnalyzerId); + System.out.println("Source analyzer verified: " + verifiedSource.getDescription()); + + // Step 2: Copy the source analyzer to target + // Note: This copies within the same resource using the simplified 2-parameter method. + ContentAnalyzer copiedAnalyzer = null; + try { + SyncPoller copyPoller + = contentUnderstandingClient.beginCopyAnalyzer(targetAnalyzerId, sourceAnalyzerId); + copiedAnalyzer = copyPoller.getFinalResult(); + System.out.println("Analyzer copied to '" + targetAnalyzerId + "' successfully!"); + // END: com.azure.ai.contentunderstanding.copyAnalyzer + } catch (com.azure.core.exception.ResourceNotFoundException e) { + // Some Content Understanding endpoints may not support same-resource copy operations + // This is a service-side configuration, not a SDK bug + System.out.println("⚠️ Copy operation not supported on this endpoint."); + System.out.println(" Error: " + e.getMessage()); + System.out.println(" Note: For cross-resource copying, use Sample15_GrantCopyAuth."); + System.out.println("\n📋 CopyAnalyzer API Pattern Demonstrated:"); + System.out.println(" contentUnderstandingClient.beginCopyAnalyzer(targetId, sourceId);"); + System.out.println( + " For cross-resource: beginCopyAnalyzer(targetId, sourceId, allowReplace, sourceResourceId, sourceRegion);"); + return; // Skip the rest of the test + } + + // ========== VERIFICATION: Source Analyzer Creation ========== + System.out.println("\n📋 Source Analyzer Creation Verification:"); + + // Verify analyzer IDs + assertNotNull(sourceAnalyzerId, "Source analyzer ID should not be null"); + assertFalse(sourceAnalyzerId.trim().isEmpty(), "Source analyzer ID should not be empty"); + assertNotNull(targetAnalyzerId, "Target analyzer ID should not be null"); + assertFalse(targetAnalyzerId.trim().isEmpty(), "Target analyzer ID should not be empty"); + assertNotEquals(sourceAnalyzerId, targetAnalyzerId, "Source and target IDs should be different"); + System.out.println(" ✓ Analyzer IDs validated"); + System.out.println(" Source: " + sourceAnalyzerId); + System.out.println(" Target: " + targetAnalyzerId); + + // Verify source config + assertNotNull(sourceConfig, "Source config should not be null"); + assertEquals(false, sourceConfig.isEnableFormula(), "EnableFormula should be false"); + assertEquals(true, sourceConfig.isEnableLayout(), "EnableLayout should be true"); + assertEquals(true, sourceConfig.isEnableOcr(), "EnableOcr should be true"); + assertEquals(true, sourceConfig.isEstimateFieldSourceAndConfidence(), + "EstimateFieldSourceAndConfidence should be true"); + assertEquals(true, sourceConfig.isReturnDetails(), "ReturnDetails should be true"); + System.out.println(" ✓ Source config verified"); + + // Verify source field schema + assertNotNull(sourceFieldSchema, "Source field schema should not be null"); + assertEquals("company_schema", sourceFieldSchema.getName(), "Field schema name should match"); + assertEquals("Schema for extracting company information", sourceFieldSchema.getDescription(), + "Field schema description should match"); + assertEquals(2, sourceFieldSchema.getFields().size(), "Should have 2 fields"); + System.out.println(" ✓ Source field schema verified: " + sourceFieldSchema.getName()); + + // Verify individual field definitions + assertTrue(sourceFieldSchema.getFields().containsKey("company_name"), "Should contain company_name field"); + ContentFieldDefinition companyField = sourceFieldSchema.getFields().get("company_name"); + assertEquals(ContentFieldType.STRING, companyField.getType(), "company_name should be STRING type"); + assertEquals(GenerationMethod.EXTRACT, companyField.getMethod(), "company_name should use EXTRACT method"); + assertEquals("Name of the company", companyField.getDescription(), "company_name description should match"); + System.out.println(" ✓ company_name field verified"); + + assertTrue(sourceFieldSchema.getFields().containsKey("total_amount"), "Should contain total_amount field"); + ContentFieldDefinition amountField = sourceFieldSchema.getFields().get("total_amount"); + assertEquals(ContentFieldType.NUMBER, amountField.getType(), "total_amount should be NUMBER type"); + assertEquals(GenerationMethod.EXTRACT, amountField.getMethod(), "total_amount should use EXTRACT method"); + assertEquals("Total amount on the document", amountField.getDescription(), + "total_amount description should match"); + System.out.println(" ✓ total_amount field verified"); + + // Verify source analyzer object + assertNotNull(sourceAnalyzer, "Source analyzer object should not be null"); + assertEquals("prebuilt-document", sourceAnalyzer.getBaseAnalyzerId(), "Base analyzer ID should match"); + assertEquals("Source analyzer for copying", sourceAnalyzer.getDescription(), "Description should match"); + assertTrue(sourceAnalyzer.getModels().containsKey("completion"), "Should have completion model"); + assertEquals("gpt-4.1", sourceAnalyzer.getModels().get("completion"), "Completion model should be gpt-4.1"); + assertTrue(sourceAnalyzer.getTags().containsKey("modelType"), "Should have modelType tag"); + assertEquals("in_development", sourceAnalyzer.getTags().get("modelType"), + "modelType tag should be in_development"); + System.out.println(" ✓ Source analyzer object verified"); + + // Verify creation result + assertNotNull(sourceResult, "Source analyzer result should not be null"); + assertEquals("prebuilt-document", sourceResult.getBaseAnalyzerId(), "Base analyzer ID should match"); + assertEquals("Source analyzer for copying", sourceResult.getDescription(), "Description should match"); + System.out.println(" ✓ Source analyzer created: " + sourceAnalyzerId); + + // Verify config in result + assertNotNull(sourceResult.getConfig(), "Config should not be null in result"); + assertEquals(false, sourceResult.getConfig().isEnableFormula(), "EnableFormula should be preserved"); + assertEquals(true, sourceResult.getConfig().isEnableLayout(), "EnableLayout should be preserved"); + assertEquals(true, sourceResult.getConfig().isEnableOcr(), "EnableOcr should be preserved"); + System.out.println(" ✓ Config preserved in result"); + + // Verify field schema in result + assertNotNull(sourceResult.getFieldSchema(), "Field schema should not be null in result"); + assertEquals("company_schema", sourceResult.getFieldSchema().getName(), + "Field schema name should be preserved"); + assertEquals(2, sourceResult.getFieldSchema().getFields().size(), "Should have 2 fields in result"); + assertTrue(sourceResult.getFieldSchema().getFields().containsKey("company_name"), + "Should contain company_name in result"); + assertTrue(sourceResult.getFieldSchema().getFields().containsKey("total_amount"), + "Should contain total_amount in result"); + System.out + .println(" ✓ Field schema preserved: " + sourceResult.getFieldSchema().getFields().size() + " fields"); + + // Verify tags in result + assertNotNull(sourceResult.getTags(), "Tags should not be null in result"); + assertTrue(sourceResult.getTags().containsKey("modelType"), "Should contain modelType tag in result"); + assertEquals("in_development", sourceResult.getTags().get("modelType"), + "modelType tag should be preserved"); + System.out.println(" ✓ Tags preserved: " + sourceResult.getTags().size() + " tag(s)"); + + // Verify models in result + assertNotNull(sourceResult.getModels(), "Models should not be null in result"); + assertTrue(sourceResult.getModels().containsKey("completion"), "Should have completion model in result"); + assertEquals("gpt-4.1", sourceResult.getModels().get("completion"), "Completion model should be preserved"); + System.out.println(" ✓ Models preserved: " + sourceResult.getModels().size() + " model(s)"); + + System.out.println("\n✅ Source analyzer creation completed:"); + System.out.println(" ID: " + sourceAnalyzerId); + System.out.println(" Base: " + sourceResult.getBaseAnalyzerId()); + System.out.println(" Fields: " + sourceResult.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + sourceResult.getTags().size()); + System.out.println(" Models: " + sourceResult.getModels().size()); + + // Get the source analyzer to verify retrieval + ContentAnalyzer sourceAnalyzerInfo = contentUnderstandingClient.getAnalyzer(sourceAnalyzerId); + + System.out.println("\n📋 Source Analyzer Retrieval Verification:"); + assertNotNull(sourceAnalyzerInfo, "Source analyzer info should not be null"); + assertEquals(sourceResult.getBaseAnalyzerId(), sourceAnalyzerInfo.getBaseAnalyzerId(), + "Base analyzer should match"); + assertEquals(sourceResult.getDescription(), sourceAnalyzerInfo.getDescription(), + "Description should match"); + System.out.println(" ✓ Source analyzer retrieved successfully"); + System.out.println(" Description: " + sourceAnalyzerInfo.getDescription()); + System.out.println(" Tags: " + String.join(", ", + sourceAnalyzerInfo.getTags() + .entrySet() + .stream() + .map(e -> e.getKey() + "=" + e.getValue()) + .toArray(String[]::new))); + + // ========== VERIFICATION: Analyzer Copy Operation ========== + System.out.println("\n📋 Analyzer Copy Verification:"); + assertNotNull(copiedAnalyzer, "Copied analyzer should not be null"); + System.out.println(" ✓ Copy operation completed"); + + // Verify base properties match source + assertEquals(sourceResult.getBaseAnalyzerId(), copiedAnalyzer.getBaseAnalyzerId(), + "Copied analyzer should have same base analyzer ID"); + assertEquals(sourceResult.getDescription(), copiedAnalyzer.getDescription(), + "Copied analyzer should have same description"); + System.out.println(" ✓ Base properties preserved"); + System.out.println(" Base analyzer ID: " + copiedAnalyzer.getBaseAnalyzerId()); + System.out.println(" Description: '" + copiedAnalyzer.getDescription() + "'"); + + // Verify field schema structure + assertNotNull(copiedAnalyzer.getFieldSchema(), "Copied analyzer should have field schema"); + assertEquals(sourceResult.getFieldSchema().getName(), copiedAnalyzer.getFieldSchema().getName(), + "Field schema name should match"); + assertEquals(sourceResult.getFieldSchema().getDescription(), + copiedAnalyzer.getFieldSchema().getDescription(), "Field schema description should match"); + assertEquals(sourceResult.getFieldSchema().getFields().size(), + copiedAnalyzer.getFieldSchema().getFields().size(), "Field count should match"); + System.out.println(" ✓ Field schema structure preserved"); + System.out.println(" Schema: " + copiedAnalyzer.getFieldSchema().getName()); + System.out.println(" Fields: " + copiedAnalyzer.getFieldSchema().getFields().size()); + + // Verify individual field definitions were copied correctly + assertTrue(copiedAnalyzer.getFieldSchema().getFields().containsKey("company_name"), + "Copied analyzer should contain company_name field"); + ContentFieldDefinition copiedCompanyField = copiedAnalyzer.getFieldSchema().getFields().get("company_name"); + assertEquals(ContentFieldType.STRING, copiedCompanyField.getType(), + "company_name type should be preserved"); + assertEquals(GenerationMethod.EXTRACT, copiedCompanyField.getMethod(), + "company_name method should be preserved"); + System.out.println( + " ✓ company_name field: " + copiedCompanyField.getType() + " / " + copiedCompanyField.getMethod()); + + assertTrue(copiedAnalyzer.getFieldSchema().getFields().containsKey("total_amount"), + "Copied analyzer should contain total_amount field"); + ContentFieldDefinition copiedAmountField = copiedAnalyzer.getFieldSchema().getFields().get("total_amount"); + assertEquals(ContentFieldType.NUMBER, copiedAmountField.getType(), "total_amount type should be preserved"); + assertEquals(GenerationMethod.EXTRACT, copiedAmountField.getMethod(), + "total_amount method should be preserved"); + System.out.println( + " ✓ total_amount field: " + copiedAmountField.getType() + " / " + copiedAmountField.getMethod()); + + // Verify tags were copied + assertNotNull(copiedAnalyzer.getTags(), "Copied analyzer should have tags"); + assertEquals(sourceResult.getTags().size(), copiedAnalyzer.getTags().size(), "Tag count should match"); + assertTrue(copiedAnalyzer.getTags().containsKey("modelType"), + "Copied analyzer should contain modelType tag"); + assertEquals("in_development", copiedAnalyzer.getTags().get("modelType"), + "Copied analyzer should have same tag value"); + System.out.println(" ✓ Tags preserved: " + copiedAnalyzer.getTags().size() + " tag(s)"); + System.out.println(" modelType=" + copiedAnalyzer.getTags().get("modelType")); + + // Verify config was copied + assertNotNull(copiedAnalyzer.getConfig(), "Copied analyzer should have config"); + assertEquals(sourceResult.getConfig().isEnableFormula(), copiedAnalyzer.getConfig().isEnableFormula(), + "EnableFormula should match"); + assertEquals(sourceResult.getConfig().isEnableLayout(), copiedAnalyzer.getConfig().isEnableLayout(), + "EnableLayout should match"); + assertEquals(sourceResult.getConfig().isEnableOcr(), copiedAnalyzer.getConfig().isEnableOcr(), + "EnableOcr should match"); + assertEquals(sourceResult.getConfig().isEstimateFieldSourceAndConfidence(), + copiedAnalyzer.getConfig().isEstimateFieldSourceAndConfidence(), + "EstimateFieldSourceAndConfidence should match"); + assertEquals(sourceResult.getConfig().isReturnDetails(), copiedAnalyzer.getConfig().isReturnDetails(), + "ReturnDetails should match"); + System.out.println(" ✓ Config preserved"); + System.out.println(" EnableLayout: " + copiedAnalyzer.getConfig().isEnableLayout()); + System.out.println(" EnableOcr: " + copiedAnalyzer.getConfig().isEnableOcr()); + + // Verify models were copied + assertNotNull(copiedAnalyzer.getModels(), "Copied analyzer should have models"); + assertEquals(sourceResult.getModels().size(), copiedAnalyzer.getModels().size(), + "Model count should match"); + if (copiedAnalyzer.getModels().containsKey("completion")) { + assertEquals("gpt-4.1", copiedAnalyzer.getModels().get("completion"), "Completion model should match"); + System.out.println(" ✓ Models preserved: " + copiedAnalyzer.getModels().size() + " model(s)"); + System.out.println(" completion=" + copiedAnalyzer.getModels().get("completion")); + } + + // Verify the copied analyzer via Get operation + ContentAnalyzer verifiedCopy = contentUnderstandingClient.getAnalyzer(targetAnalyzerId); + + System.out.println("\n📋 Copied Analyzer Retrieval Verification:"); + assertNotNull(verifiedCopy, "Retrieved copied analyzer should not be null"); + assertEquals(copiedAnalyzer.getBaseAnalyzerId(), verifiedCopy.getBaseAnalyzerId(), + "Retrieved analyzer should match copied analyzer"); + assertEquals(copiedAnalyzer.getDescription(), verifiedCopy.getDescription(), + "Retrieved description should match"); + assertEquals(copiedAnalyzer.getFieldSchema().getFields().size(), + verifiedCopy.getFieldSchema().getFields().size(), "Retrieved field count should match"); + System.out.println(" ✓ Copied analyzer verified via retrieval"); + + // Summary + String separator = new String(new char[60]).replace("\0", "═"); + System.out.println("\n" + separator); + System.out.println("✅ ANALYZER COPY VERIFICATION COMPLETED SUCCESSFULLY"); + System.out.println(separator); + System.out.println("Source Analyzer:"); + System.out.println(" ID: " + sourceAnalyzerId); + System.out.println(" Base: " + sourceResult.getBaseAnalyzerId()); + System.out.println(" Description: " + sourceResult.getDescription()); + System.out.println(" Fields: " + sourceResult.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + sourceResult.getTags().size()); + System.out.println(" Models: " + sourceResult.getModels().size()); + System.out.println("\nTarget Analyzer (Copied):"); + System.out.println(" ID: " + targetAnalyzerId); + System.out.println(" Base: " + copiedAnalyzer.getBaseAnalyzerId()); + System.out.println(" Description: " + copiedAnalyzer.getDescription()); + System.out.println(" Fields: " + copiedAnalyzer.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + copiedAnalyzer.getTags().size()); + System.out.println(" Models: " + copiedAnalyzer.getModels().size()); + System.out.println("\n✅ All properties successfully copied and verified!"); + System.out.println(separator); + + } finally { + // Cleanup: Delete the analyzers + try { + contentUnderstandingClient.deleteAnalyzer(sourceAnalyzerId); + System.out.println("\nSource analyzer deleted: " + sourceAnalyzerId); + } catch (Exception e) { + System.out.println("Note: Failed to delete source analyzer (may not exist): " + e.getMessage()); + } + + try { + contentUnderstandingClient.deleteAnalyzer(targetAnalyzerId); + System.out.println("Target analyzer deleted: " + targetAnalyzerId); + } catch (Exception e) { + System.out.println("Note: Failed to delete target analyzer (may not exist): " + e.getMessage()); + } + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample14_CopyAnalyzerAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample14_CopyAnalyzerAsync.java new file mode 100644 index 000000000000..2b595a787902 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample14_CopyAnalyzerAsync.java @@ -0,0 +1,392 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.util.polling.PollerFlux; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Async sample demonstrates how to copy an analyzer within the same resource. + * For cross-resource copying, see Sample15_GrantCopyAuthAsync. + */ +public class Sample14_CopyAnalyzerAsync extends ContentUnderstandingClientTestBase { + + /** + * Asynchronous sample for copying an analyzer. + */ + @Test + public void testCopyAnalyzerAsync() { + System.out.println("✓ Client initialized successfully"); + + // Generate unique analyzer IDs for this test + String sourceAnalyzerId = testResourceNamer.randomName("test_analyzer_source_", 50); + String targetAnalyzerId = testResourceNamer.randomName("test_analyzer_target_", 50); + + try { + // BEGIN: com.azure.ai.contentunderstanding.copyAnalyzerAsync + // Step 1: Create the source analyzer + ContentAnalyzerConfig sourceConfig = new ContentAnalyzerConfig(); + sourceConfig.setEnableFormula(false); + sourceConfig.setEnableLayout(true); + sourceConfig.setEnableOcr(true); + sourceConfig.setEstimateFieldSourceAndConfidence(true); + sourceConfig.setReturnDetails(true); + + Map fields = new HashMap<>(); + + ContentFieldDefinition companyNameField = new ContentFieldDefinition(); + companyNameField.setType(ContentFieldType.STRING); + companyNameField.setMethod(GenerationMethod.EXTRACT); + companyNameField.setDescription("Name of the company"); + fields.put("company_name", companyNameField); + + ContentFieldDefinition totalAmountField = new ContentFieldDefinition(); + totalAmountField.setType(ContentFieldType.NUMBER); + totalAmountField.setMethod(GenerationMethod.EXTRACT); + totalAmountField.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountField); + + ContentFieldSchema sourceFieldSchema = new ContentFieldSchema(); + sourceFieldSchema.setName("company_schema"); + sourceFieldSchema.setDescription("Schema for extracting company information"); + sourceFieldSchema.setFields(fields); + + ContentAnalyzer sourceAnalyzer = new ContentAnalyzer(); + sourceAnalyzer.setBaseAnalyzerId("prebuilt-document"); + sourceAnalyzer.setDescription("Source analyzer for copying"); + sourceAnalyzer.setConfig(sourceConfig); + sourceAnalyzer.setFieldSchema(sourceFieldSchema); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + sourceAnalyzer.setModels(models); + + Map tags = new HashMap<>(); + tags.put("modelType", "in_development"); + sourceAnalyzer.setTags(tags); + + // Create source analyzer + PollerFlux createPoller + = contentUnderstandingAsyncClient.beginCreateAnalyzer(sourceAnalyzerId, sourceAnalyzer); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + ContentAnalyzer sourceResult = createPoller.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + System.out.println("Source analyzer '" + sourceAnalyzerId + "' created successfully!"); + + // Verify source analyzer is available before copying (ensure it's fully provisioned) + ContentAnalyzer verifiedSource = contentUnderstandingAsyncClient.getAnalyzer(sourceAnalyzerId).block(); + System.out.println("Source analyzer verified: " + verifiedSource.getDescription()); + + // Step 2: Copy the source analyzer to target + // Note: This copies within the same resource using the simplified 2-parameter method. + ContentAnalyzer copiedAnalyzer = null; + try { + PollerFlux copyPoller + = contentUnderstandingAsyncClient.beginCopyAnalyzer(targetAnalyzerId, sourceAnalyzerId); + + // Use reactive pattern for copy operation as well + copiedAnalyzer = copyPoller.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + System.out.println("Analyzer copied to '" + targetAnalyzerId + "' successfully!"); + // END: com.azure.ai.contentunderstanding.copyAnalyzerAsync + } catch (com.azure.core.exception.ResourceNotFoundException e) { + // Some Content Understanding endpoints may not support same-resource copy operations + // This is a service-side configuration, not a SDK bug + System.out.println("⚠️ Copy operation not supported on this endpoint."); + System.out.println(" Error: " + e.getMessage()); + System.out.println(" Note: For cross-resource copying, use Sample15_GrantCopyAuthAsync."); + System.out.println("\n📋 CopyAnalyzer API Pattern Demonstrated:"); + System.out.println(" contentUnderstandingAsyncClient.beginCopyAnalyzer(targetId, sourceId);"); + System.out.println( + " For cross-resource: beginCopyAnalyzer(targetId, sourceId, allowReplace, sourceResourceId, sourceRegion);"); + return; // Skip the rest of the test + } + + // ========== VERIFICATION: Source Analyzer Creation ========== + System.out.println("\n📋 Source Analyzer Creation Verification:"); + + // Verify analyzer IDs + assertNotNull(sourceAnalyzerId, "Source analyzer ID should not be null"); + assertFalse(sourceAnalyzerId.trim().isEmpty(), "Source analyzer ID should not be empty"); + assertNotNull(targetAnalyzerId, "Target analyzer ID should not be null"); + assertFalse(targetAnalyzerId.trim().isEmpty(), "Target analyzer ID should not be empty"); + assertNotEquals(sourceAnalyzerId, targetAnalyzerId, "Source and target IDs should be different"); + System.out.println(" ✓ Analyzer IDs validated"); + System.out.println(" Source: " + sourceAnalyzerId); + System.out.println(" Target: " + targetAnalyzerId); + + // Verify source config + assertNotNull(sourceConfig, "Source config should not be null"); + assertEquals(false, sourceConfig.isEnableFormula(), "EnableFormula should be false"); + assertEquals(true, sourceConfig.isEnableLayout(), "EnableLayout should be true"); + assertEquals(true, sourceConfig.isEnableOcr(), "EnableOcr should be true"); + assertEquals(true, sourceConfig.isEstimateFieldSourceAndConfidence(), + "EstimateFieldSourceAndConfidence should be true"); + assertEquals(true, sourceConfig.isReturnDetails(), "ReturnDetails should be true"); + System.out.println(" ✓ Source config verified"); + + // Verify source field schema + assertNotNull(sourceFieldSchema, "Source field schema should not be null"); + assertEquals("company_schema", sourceFieldSchema.getName(), "Field schema name should match"); + assertEquals("Schema for extracting company information", sourceFieldSchema.getDescription(), + "Field schema description should match"); + assertEquals(2, sourceFieldSchema.getFields().size(), "Should have 2 fields"); + System.out.println(" ✓ Source field schema verified: " + sourceFieldSchema.getName()); + + // Verify individual field definitions + assertTrue(sourceFieldSchema.getFields().containsKey("company_name"), "Should contain company_name field"); + ContentFieldDefinition companyField = sourceFieldSchema.getFields().get("company_name"); + assertEquals(ContentFieldType.STRING, companyField.getType(), "company_name should be STRING type"); + assertEquals(GenerationMethod.EXTRACT, companyField.getMethod(), "company_name should use EXTRACT method"); + assertEquals("Name of the company", companyField.getDescription(), "company_name description should match"); + System.out.println(" ✓ company_name field verified"); + + assertTrue(sourceFieldSchema.getFields().containsKey("total_amount"), "Should contain total_amount field"); + ContentFieldDefinition amountField = sourceFieldSchema.getFields().get("total_amount"); + assertEquals(ContentFieldType.NUMBER, amountField.getType(), "total_amount should be NUMBER type"); + assertEquals(GenerationMethod.EXTRACT, amountField.getMethod(), "total_amount should use EXTRACT method"); + assertEquals("Total amount on the document", amountField.getDescription(), + "total_amount description should match"); + System.out.println(" ✓ total_amount field verified"); + + // Verify source analyzer object + assertNotNull(sourceAnalyzer, "Source analyzer object should not be null"); + assertEquals("prebuilt-document", sourceAnalyzer.getBaseAnalyzerId(), "Base analyzer ID should match"); + assertEquals("Source analyzer for copying", sourceAnalyzer.getDescription(), "Description should match"); + assertTrue(sourceAnalyzer.getModels().containsKey("completion"), "Should have completion model"); + assertEquals("gpt-4.1", sourceAnalyzer.getModels().get("completion"), "Completion model should be gpt-4.1"); + assertTrue(sourceAnalyzer.getTags().containsKey("modelType"), "Should have modelType tag"); + assertEquals("in_development", sourceAnalyzer.getTags().get("modelType"), + "modelType tag should be in_development"); + System.out.println(" ✓ Source analyzer object verified"); + + // Verify creation result + assertNotNull(sourceResult, "Source analyzer result should not be null"); + assertEquals("prebuilt-document", sourceResult.getBaseAnalyzerId(), "Base analyzer ID should match"); + assertEquals("Source analyzer for copying", sourceResult.getDescription(), "Description should match"); + System.out.println(" ✓ Source analyzer created: " + sourceAnalyzerId); + + // Verify config in result + assertNotNull(sourceResult.getConfig(), "Config should not be null in result"); + assertEquals(false, sourceResult.getConfig().isEnableFormula(), "EnableFormula should be preserved"); + assertEquals(true, sourceResult.getConfig().isEnableLayout(), "EnableLayout should be preserved"); + assertEquals(true, sourceResult.getConfig().isEnableOcr(), "EnableOcr should be preserved"); + System.out.println(" ✓ Config preserved in result"); + + // Verify field schema in result + assertNotNull(sourceResult.getFieldSchema(), "Field schema should not be null in result"); + assertEquals("company_schema", sourceResult.getFieldSchema().getName(), + "Field schema name should be preserved"); + assertEquals(2, sourceResult.getFieldSchema().getFields().size(), "Should have 2 fields in result"); + assertTrue(sourceResult.getFieldSchema().getFields().containsKey("company_name"), + "Should contain company_name in result"); + assertTrue(sourceResult.getFieldSchema().getFields().containsKey("total_amount"), + "Should contain total_amount in result"); + System.out + .println(" ✓ Field schema preserved: " + sourceResult.getFieldSchema().getFields().size() + " fields"); + + // Verify tags in result + assertNotNull(sourceResult.getTags(), "Tags should not be null in result"); + assertTrue(sourceResult.getTags().containsKey("modelType"), "Should contain modelType tag in result"); + assertEquals("in_development", sourceResult.getTags().get("modelType"), + "modelType tag should be preserved"); + System.out.println(" ✓ Tags preserved: " + sourceResult.getTags().size() + " tag(s)"); + + // Verify models in result + assertNotNull(sourceResult.getModels(), "Models should not be null in result"); + assertTrue(sourceResult.getModels().containsKey("completion"), "Should have completion model in result"); + assertEquals("gpt-4.1", sourceResult.getModels().get("completion"), "Completion model should be preserved"); + System.out.println(" ✓ Models preserved: " + sourceResult.getModels().size() + " model(s)"); + + System.out.println("\n✅ Source analyzer creation completed:"); + System.out.println(" ID: " + sourceAnalyzerId); + System.out.println(" Base: " + sourceResult.getBaseAnalyzerId()); + System.out.println(" Fields: " + sourceResult.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + sourceResult.getTags().size()); + System.out.println(" Models: " + sourceResult.getModels().size()); + + // Get the source analyzer to verify retrieval + ContentAnalyzer sourceAnalyzerInfo = contentUnderstandingAsyncClient.getAnalyzer(sourceAnalyzerId).block(); + + System.out.println("\n📋 Source Analyzer Retrieval Verification:"); + assertNotNull(sourceAnalyzerInfo, "Source analyzer info should not be null"); + assertEquals(sourceResult.getBaseAnalyzerId(), sourceAnalyzerInfo.getBaseAnalyzerId(), + "Base analyzer should match"); + assertEquals(sourceResult.getDescription(), sourceAnalyzerInfo.getDescription(), + "Description should match"); + System.out.println(" ✓ Source analyzer retrieved successfully"); + System.out.println(" Description: " + sourceAnalyzerInfo.getDescription()); + System.out.println(" Tags: " + String.join(", ", + sourceAnalyzerInfo.getTags() + .entrySet() + .stream() + .map(e -> e.getKey() + "=" + e.getValue()) + .toArray(String[]::new))); + + // ========== VERIFICATION: Analyzer Copy Operation ========== + System.out.println("\n📋 Analyzer Copy Verification:"); + assertNotNull(copiedAnalyzer, "Copied analyzer should not be null"); + System.out.println(" ✓ Copy operation completed"); + + // Verify base properties match source + assertEquals(sourceResult.getBaseAnalyzerId(), copiedAnalyzer.getBaseAnalyzerId(), + "Copied analyzer should have same base analyzer ID"); + assertEquals(sourceResult.getDescription(), copiedAnalyzer.getDescription(), + "Copied analyzer should have same description"); + System.out.println(" ✓ Base properties preserved"); + System.out.println(" Base analyzer ID: " + copiedAnalyzer.getBaseAnalyzerId()); + System.out.println(" Description: '" + copiedAnalyzer.getDescription() + "'"); + + // Verify field schema structure + assertNotNull(copiedAnalyzer.getFieldSchema(), "Copied analyzer should have field schema"); + assertEquals(sourceResult.getFieldSchema().getName(), copiedAnalyzer.getFieldSchema().getName(), + "Field schema name should match"); + assertEquals(sourceResult.getFieldSchema().getDescription(), + copiedAnalyzer.getFieldSchema().getDescription(), "Field schema description should match"); + assertEquals(sourceResult.getFieldSchema().getFields().size(), + copiedAnalyzer.getFieldSchema().getFields().size(), "Field count should match"); + System.out.println(" ✓ Field schema structure preserved"); + System.out.println(" Schema: " + copiedAnalyzer.getFieldSchema().getName()); + System.out.println(" Fields: " + copiedAnalyzer.getFieldSchema().getFields().size()); + + // Verify individual field definitions were copied correctly + assertTrue(copiedAnalyzer.getFieldSchema().getFields().containsKey("company_name"), + "Copied analyzer should contain company_name field"); + ContentFieldDefinition copiedCompanyField = copiedAnalyzer.getFieldSchema().getFields().get("company_name"); + assertEquals(ContentFieldType.STRING, copiedCompanyField.getType(), + "company_name type should be preserved"); + assertEquals(GenerationMethod.EXTRACT, copiedCompanyField.getMethod(), + "company_name method should be preserved"); + System.out.println( + " ✓ company_name field: " + copiedCompanyField.getType() + " / " + copiedCompanyField.getMethod()); + + assertTrue(copiedAnalyzer.getFieldSchema().getFields().containsKey("total_amount"), + "Copied analyzer should contain total_amount field"); + ContentFieldDefinition copiedAmountField = copiedAnalyzer.getFieldSchema().getFields().get("total_amount"); + assertEquals(ContentFieldType.NUMBER, copiedAmountField.getType(), "total_amount type should be preserved"); + assertEquals(GenerationMethod.EXTRACT, copiedAmountField.getMethod(), + "total_amount method should be preserved"); + System.out.println( + " ✓ total_amount field: " + copiedAmountField.getType() + " / " + copiedAmountField.getMethod()); + + // Verify tags were copied + assertNotNull(copiedAnalyzer.getTags(), "Copied analyzer should have tags"); + assertEquals(sourceResult.getTags().size(), copiedAnalyzer.getTags().size(), "Tag count should match"); + assertTrue(copiedAnalyzer.getTags().containsKey("modelType"), + "Copied analyzer should contain modelType tag"); + assertEquals("in_development", copiedAnalyzer.getTags().get("modelType"), + "Copied analyzer should have same tag value"); + System.out.println(" ✓ Tags preserved: " + copiedAnalyzer.getTags().size() + " tag(s)"); + System.out.println(" modelType=" + copiedAnalyzer.getTags().get("modelType")); + + // Verify config was copied + assertNotNull(copiedAnalyzer.getConfig(), "Copied analyzer should have config"); + assertEquals(sourceResult.getConfig().isEnableFormula(), copiedAnalyzer.getConfig().isEnableFormula(), + "EnableFormula should match"); + assertEquals(sourceResult.getConfig().isEnableLayout(), copiedAnalyzer.getConfig().isEnableLayout(), + "EnableLayout should match"); + assertEquals(sourceResult.getConfig().isEnableOcr(), copiedAnalyzer.getConfig().isEnableOcr(), + "EnableOcr should match"); + assertEquals(sourceResult.getConfig().isEstimateFieldSourceAndConfidence(), + copiedAnalyzer.getConfig().isEstimateFieldSourceAndConfidence(), + "EstimateFieldSourceAndConfidence should match"); + assertEquals(sourceResult.getConfig().isReturnDetails(), copiedAnalyzer.getConfig().isReturnDetails(), + "ReturnDetails should match"); + System.out.println(" ✓ Config preserved"); + System.out.println(" EnableLayout: " + copiedAnalyzer.getConfig().isEnableLayout()); + System.out.println(" EnableOcr: " + copiedAnalyzer.getConfig().isEnableOcr()); + + // Verify models were copied + assertNotNull(copiedAnalyzer.getModels(), "Copied analyzer should have models"); + assertEquals(sourceResult.getModels().size(), copiedAnalyzer.getModels().size(), + "Model count should match"); + if (copiedAnalyzer.getModels().containsKey("completion")) { + assertEquals("gpt-4.1", copiedAnalyzer.getModels().get("completion"), "Completion model should match"); + System.out.println(" ✓ Models preserved: " + copiedAnalyzer.getModels().size() + " model(s)"); + System.out.println(" completion=" + copiedAnalyzer.getModels().get("completion")); + } + + // Verify the copied analyzer via Get operation + ContentAnalyzer verifiedCopy = contentUnderstandingAsyncClient.getAnalyzer(targetAnalyzerId).block(); + + System.out.println("\n📋 Copied Analyzer Retrieval Verification:"); + assertNotNull(verifiedCopy, "Retrieved copied analyzer should not be null"); + assertEquals(copiedAnalyzer.getBaseAnalyzerId(), verifiedCopy.getBaseAnalyzerId(), + "Retrieved analyzer should match copied analyzer"); + assertEquals(copiedAnalyzer.getDescription(), verifiedCopy.getDescription(), + "Retrieved description should match"); + assertEquals(copiedAnalyzer.getFieldSchema().getFields().size(), + verifiedCopy.getFieldSchema().getFields().size(), "Retrieved field count should match"); + System.out.println(" ✓ Copied analyzer verified via retrieval"); + + // Summary + String separator = new String(new char[60]).replace("\0", "═"); + System.out.println("\n" + separator); + System.out.println("✅ ANALYZER COPY VERIFICATION COMPLETED SUCCESSFULLY"); + System.out.println(separator); + System.out.println("Source Analyzer:"); + System.out.println(" ID: " + sourceAnalyzerId); + System.out.println(" Base: " + sourceResult.getBaseAnalyzerId()); + System.out.println(" Description: " + sourceResult.getDescription()); + System.out.println(" Fields: " + sourceResult.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + sourceResult.getTags().size()); + System.out.println(" Models: " + sourceResult.getModels().size()); + System.out.println("\nTarget Analyzer (Copied):"); + System.out.println(" ID: " + targetAnalyzerId); + System.out.println(" Base: " + copiedAnalyzer.getBaseAnalyzerId()); + System.out.println(" Description: " + copiedAnalyzer.getDescription()); + System.out.println(" Fields: " + copiedAnalyzer.getFieldSchema().getFields().size()); + System.out.println(" Tags: " + copiedAnalyzer.getTags().size()); + System.out.println(" Models: " + copiedAnalyzer.getModels().size()); + System.out.println("\n✅ All properties successfully copied and verified!"); + System.out.println(separator); + + } finally { + // Cleanup: Delete the analyzers + try { + contentUnderstandingAsyncClient.deleteAnalyzer(sourceAnalyzerId).block(); + System.out.println("\nSource analyzer deleted: " + sourceAnalyzerId); + } catch (Exception e) { + System.out.println("Note: Failed to delete source analyzer (may not exist): " + e.getMessage()); + } + + try { + contentUnderstandingAsyncClient.deleteAnalyzer(targetAnalyzerId).block(); + System.out.println("Target analyzer deleted: " + targetAnalyzerId); + } catch (Exception e) { + System.out.println("Note: Failed to delete target analyzer (may not exist): " + e.getMessage()); + } + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample15_GrantCopyAuth.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample15_GrantCopyAuth.java new file mode 100644 index 000000000000..707a32d9e949 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample15_GrantCopyAuth.java @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.CopyAuthorization; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.test.annotation.LiveOnly; +import com.azure.core.util.polling.SyncPoller; +import com.azure.identity.DefaultAzureCredentialBuilder; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Sample demonstrates how to grant copy authorization and copy an analyzer from a source + * Microsoft Foundry resource to a target Microsoft Foundry resource (cross-resource copying). + * + *

For same-resource copying, see Sample14_CopyAnalyzer.

+ * + *

Required environment variables for cross-resource copying:

+ *
    + *
  • SOURCE_RESOURCE_ID: Azure resource ID of the source resource
  • + *
  • SOURCE_REGION: Region of the source resource
  • + *
  • TARGET_ENDPOINT: Endpoint of the target resource
  • + *
  • TARGET_KEY (optional): API key for target resource
  • + *
  • TARGET_RESOURCE_ID: Azure resource ID of the target resource
  • + *
  • TARGET_REGION: Region of the target resource
  • + *
+ * + *

Note: If API key is not provided, DefaultAzureCredential will be used. + * Cross-resource copying with DefaultAzureCredential requires 'Cognitive Services User' role + * on both source and target resources.

+ */ +public class Sample15_GrantCopyAuth extends ContentUnderstandingClientTestBase { + + /** + * Demonstrates cross-resource copying with actual resource information. + * + * This test is marked as LiveOnly because it requires connecting to two separate + * Azure resources, which cannot be reliably replayed in PLAYBACK mode. + */ + @LiveOnly + @Test + public void testCrossResourceCopy() { + // Check for required environment variables (matching samples naming convention) + String sourceResourceId = System.getenv("SOURCE_RESOURCE_ID"); + String sourceRegion = System.getenv("SOURCE_REGION"); + String targetEndpoint = System.getenv("TARGET_ENDPOINT"); + String targetKey = System.getenv("TARGET_KEY"); + String targetResourceId = System.getenv("TARGET_RESOURCE_ID"); + String targetRegion = System.getenv("TARGET_REGION"); + + if (sourceResourceId == null + || sourceRegion == null + || targetEndpoint == null + || targetResourceId == null + || targetRegion == null) { + System.out.println("⚠️ Cross-resource copying requires environment variables:"); + System.out.println(" SOURCE_RESOURCE_ID, SOURCE_REGION"); + System.out.println(" TARGET_ENDPOINT, TARGET_KEY (optional), TARGET_RESOURCE_ID, TARGET_REGION"); + System.out.println(" Skipping cross-resource copy test."); + return; + } + + // Build target client with appropriate authentication + ContentUnderstandingClientBuilder targetBuilder + = new ContentUnderstandingClientBuilder().endpoint(targetEndpoint); + ContentUnderstandingClient targetClient; + if (targetKey != null && !targetKey.trim().isEmpty()) { + targetClient = targetBuilder.credential(new AzureKeyCredential(targetKey)).buildClient(); + } else { + targetClient = targetBuilder.credential(new DefaultAzureCredentialBuilder().build()).buildClient(); + } + + String sourceAnalyzerId = testResourceNamer.randomName("test_cross_resource_source_", 50); + String targetAnalyzerId = testResourceNamer.randomName("test_cross_resource_target_", 50); + + try { + // Step 1: Create source analyzer + ContentAnalyzerConfig config = new ContentAnalyzerConfig(); + config.setEnableLayout(true); + config.setEnableOcr(true); + + Map fields = new HashMap<>(); + ContentFieldDefinition companyNameField = new ContentFieldDefinition(); + companyNameField.setType(ContentFieldType.STRING); + companyNameField.setMethod(GenerationMethod.EXTRACT); + companyNameField.setDescription("Name of the company"); + fields.put("company_name", companyNameField); + + ContentFieldDefinition totalAmountField = new ContentFieldDefinition(); + totalAmountField.setType(ContentFieldType.NUMBER); + totalAmountField.setMethod(GenerationMethod.EXTRACT); + totalAmountField.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountField); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("company_schema"); + fieldSchema.setDescription("Schema for extracting company information"); + fieldSchema.setFields(fields); + + ContentAnalyzer sourceAnalyzer = new ContentAnalyzer(); + sourceAnalyzer.setBaseAnalyzerId("prebuilt-document"); + sourceAnalyzer.setDescription("Source analyzer for cross-resource copying"); + sourceAnalyzer.setConfig(config); + sourceAnalyzer.setFieldSchema(fieldSchema); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + sourceAnalyzer.setModels(models); + + SyncPoller createPoller + = contentUnderstandingClient.beginCreateAnalyzer(sourceAnalyzerId, sourceAnalyzer); + ContentAnalyzer sourceResult = createPoller.getFinalResult(); + System.out.println("Source analyzer '" + sourceAnalyzerId + "' created successfully!"); + + // Step 2: Grant copy authorization using convenience method + CopyAuthorization copyAuth + = contentUnderstandingClient.grantCopyAuthorization(sourceAnalyzerId, targetResourceId, targetRegion); + + assertNotNull(copyAuth, "Copy authorization should not be null"); + System.out.println("Copy authorization granted!"); + System.out.println(" Target Azure Resource ID: " + copyAuth.getTargetAzureResourceId()); + System.out.println(" Expires at: " + copyAuth.getExpiresAt()); + + // Step 3: Copy analyzer to target resource using convenience method + SyncPoller copyPoller = targetClient + .beginCopyAnalyzer(targetAnalyzerId, sourceAnalyzerId, false, sourceResourceId, sourceRegion); + ContentAnalyzer targetResult = copyPoller.getFinalResult(); + + System.out.println("Target analyzer '" + targetAnalyzerId + "' copied successfully!"); + System.out.println(" Description: " + targetResult.getDescription()); + + // Verify copied analyzer + ContentAnalyzer copiedAnalyzer = targetClient.getAnalyzer(targetAnalyzerId); + assertNotNull(copiedAnalyzer, "Copied analyzer should not be null"); + assertEquals(sourceResult.getBaseAnalyzerId(), copiedAnalyzer.getBaseAnalyzerId()); + assertEquals(sourceResult.getDescription(), copiedAnalyzer.getDescription()); + System.out.println("Cross-resource copy verification completed"); + + } finally { + // Cleanup: delete both analyzers + try { + contentUnderstandingClient.deleteAnalyzer(sourceAnalyzerId); + System.out.println("Source analyzer '" + sourceAnalyzerId + "' deleted."); + } catch (Exception e) { + // Ignore cleanup errors + } + + try { + targetClient.deleteAnalyzer(targetAnalyzerId); + System.out.println("Target analyzer '" + targetAnalyzerId + "' deleted."); + } catch (Exception e) { + // Ignore cleanup errors + } + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample15_GrantCopyAuthAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample15_GrantCopyAuthAsync.java new file mode 100644 index 000000000000..c4d1e16c4585 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample15_GrantCopyAuthAsync.java @@ -0,0 +1,195 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.ContentUnderstandingAsyncClient; +import com.azure.ai.contentunderstanding.ContentUnderstandingClientBuilder; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerOperationStatus; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.CopyAuthorization; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.test.annotation.LiveOnly; +import com.azure.core.util.polling.PollerFlux; +import com.azure.identity.DefaultAzureCredentialBuilder; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Async sample demonstrates how to grant copy authorization and copy an analyzer from a source + * Microsoft Foundry resource to a target Microsoft Foundry resource (cross-resource copying). + * + *

For same-resource copying, see Sample14_CopyAnalyzerAsync.

+ * + *

Required environment variables for cross-resource copying:

+ *
    + *
  • SOURCE_RESOURCE_ID: Azure resource ID of the source resource
  • + *
  • SOURCE_REGION: Region of the source resource
  • + *
  • TARGET_ENDPOINT: Endpoint of the target resource
  • + *
  • TARGET_KEY (optional): API key for target resource
  • + *
  • TARGET_RESOURCE_ID: Azure resource ID of the target resource
  • + *
  • TARGET_REGION: Region of the target resource
  • + *
+ * + *

Note: If API key is not provided, DefaultAzureCredential will be used. + * Cross-resource copying with DefaultAzureCredential requires 'Cognitive Services User' role + * on both source and target resources.

+ */ +public class Sample15_GrantCopyAuthAsync extends ContentUnderstandingClientTestBase { + + /** + * Demonstrates cross-resource copying with actual resource information. + * + * This test is marked as LiveOnly because it requires connecting to two separate + * Azure resources, which cannot be reliably replayed in PLAYBACK mode. + */ + @LiveOnly + @Test + public void testCrossResourceCopyAsync() { + // Check for required environment variables (matching samples naming convention) + String sourceResourceId = System.getenv("SOURCE_RESOURCE_ID"); + String sourceRegion = System.getenv("SOURCE_REGION"); + String targetEndpoint = System.getenv("TARGET_ENDPOINT"); + String targetKey = System.getenv("TARGET_KEY"); + String targetResourceId = System.getenv("TARGET_RESOURCE_ID"); + String targetRegion = System.getenv("TARGET_REGION"); + + if (sourceResourceId == null + || sourceRegion == null + || targetEndpoint == null + || targetResourceId == null + || targetRegion == null) { + System.out.println("⚠️ Cross-resource copying requires environment variables:"); + System.out.println(" SOURCE_RESOURCE_ID, SOURCE_REGION"); + System.out.println(" TARGET_ENDPOINT, TARGET_KEY (optional), TARGET_RESOURCE_ID, TARGET_REGION"); + System.out.println(" Skipping cross-resource copy test."); + return; + } + + // Build target client with appropriate authentication + ContentUnderstandingClientBuilder targetBuilder + = new ContentUnderstandingClientBuilder().endpoint(targetEndpoint); + ContentUnderstandingAsyncClient targetAsyncClient; + if (targetKey != null && !targetKey.trim().isEmpty()) { + targetAsyncClient = targetBuilder.credential(new AzureKeyCredential(targetKey)).buildAsyncClient(); + } else { + targetAsyncClient + = targetBuilder.credential(new DefaultAzureCredentialBuilder().build()).buildAsyncClient(); + } + + String sourceAnalyzerId = testResourceNamer.randomName("test_cross_resource_source_", 50); + String targetAnalyzerId = testResourceNamer.randomName("test_cross_resource_target_", 50); + + try { + // Step 1: Create source analyzer + ContentAnalyzerConfig config = new ContentAnalyzerConfig(); + config.setEnableLayout(true); + config.setEnableOcr(true); + + Map fields = new HashMap<>(); + ContentFieldDefinition companyNameField = new ContentFieldDefinition(); + companyNameField.setType(ContentFieldType.STRING); + companyNameField.setMethod(GenerationMethod.EXTRACT); + companyNameField.setDescription("Name of the company"); + fields.put("company_name", companyNameField); + + ContentFieldDefinition totalAmountField = new ContentFieldDefinition(); + totalAmountField.setType(ContentFieldType.NUMBER); + totalAmountField.setMethod(GenerationMethod.EXTRACT); + totalAmountField.setDescription("Total amount on the document"); + fields.put("total_amount", totalAmountField); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("company_schema"); + fieldSchema.setDescription("Schema for extracting company information"); + fieldSchema.setFields(fields); + + ContentAnalyzer sourceAnalyzer = new ContentAnalyzer(); + sourceAnalyzer.setBaseAnalyzerId("prebuilt-document"); + sourceAnalyzer.setDescription("Source analyzer for cross-resource copying"); + sourceAnalyzer.setConfig(config); + sourceAnalyzer.setFieldSchema(fieldSchema); + + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + sourceAnalyzer.setModels(models); + + PollerFlux createPoller + = contentUnderstandingAsyncClient.beginCreateAnalyzer(sourceAnalyzerId, sourceAnalyzer); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + ContentAnalyzer sourceResult = createPoller.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + System.out.println("Source analyzer '" + sourceAnalyzerId + "' created successfully!"); + + // Step 2: Grant copy authorization using convenience method + CopyAuthorization copyAuth = contentUnderstandingAsyncClient + .grantCopyAuthorization(sourceAnalyzerId, targetResourceId, targetRegion) + .block(); + + assertNotNull(copyAuth, "Copy authorization should not be null"); + System.out.println("Copy authorization granted!"); + System.out.println(" Target Azure Resource ID: " + copyAuth.getTargetAzureResourceId()); + System.out.println(" Expires at: " + copyAuth.getExpiresAt()); + + // Step 3: Copy analyzer to target resource using convenience method + PollerFlux copyPoller = targetAsyncClient + .beginCopyAnalyzer(targetAnalyzerId, sourceAnalyzerId, false, sourceResourceId, sourceRegion); + + // Use reactive pattern for copy operation as well + ContentAnalyzer targetResult = copyPoller.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + System.out.println("Target analyzer '" + targetAnalyzerId + "' copied successfully!"); + System.out.println(" Description: " + targetResult.getDescription()); + + // Verify copied analyzer + ContentAnalyzer copiedAnalyzer = targetAsyncClient.getAnalyzer(targetAnalyzerId).block(); + assertNotNull(copiedAnalyzer, "Copied analyzer should not be null"); + assertEquals(sourceResult.getBaseAnalyzerId(), copiedAnalyzer.getBaseAnalyzerId()); + assertEquals(sourceResult.getDescription(), copiedAnalyzer.getDescription()); + System.out.println("Cross-resource copy verification completed"); + + } finally { + // Cleanup: delete both analyzers + try { + contentUnderstandingAsyncClient.deleteAnalyzer(sourceAnalyzerId).block(); + System.out.println("Source analyzer '" + sourceAnalyzerId + "' deleted."); + } catch (Exception e) { + // Ignore cleanup errors + } + + try { + targetAsyncClient.deleteAnalyzer(targetAnalyzerId).block(); + System.out.println("Target analyzer '" + targetAnalyzerId + "' deleted."); + } catch (Exception e) { + // Ignore cleanup errors + } + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample16_CreateAnalyzerWithLabels.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample16_CreateAnalyzerWithLabels.java new file mode 100644 index 000000000000..1942487d7f93 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample16_CreateAnalyzerWithLabels.java @@ -0,0 +1,254 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.ai.contentunderstanding.models.KnowledgeSource; +import com.azure.ai.contentunderstanding.models.LabeledDataKnowledgeSource; +import com.azure.core.util.polling.SyncPoller; +import org.junit.jupiter.api.Test; + +import com.azure.core.test.TestMode; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Sample demonstrates how to create an analyzer with labeled training data from Azure Blob Storage. + * + * Required environment variables: + * - CONTENTUNDERSTANDING_ENDPOINT: Azure Content Understanding endpoint URL + * - CONTENTUNDERSTANDING_KEY: Azure Content Understanding API key (optional if using DefaultAzureCredential) + * + * Optional environment variables: + * - TRAINING_DATA_SAS_URL: SAS URL for the container with labeled training data + * If set, the analyzer will be created with labeled data knowledge source. + * If not set, the analyzer will be created without training data (demonstration mode). + */ +public class Sample16_CreateAnalyzerWithLabels extends ContentUnderstandingClientTestBase { + + /** + * Demonstrates creating an analyzer with labeled training data. + * + * This test creates an analyzer with field schema. If TRAINING_DATA_SAS_URL is provided, + * labeled training data will be used; otherwise, it demonstrates the API pattern without + * actual training data. + */ + @Test + public void testCreateAnalyzerWithLabels() { + + String analyzerId = testResourceNamer.randomName("test_receipt_analyzer_", 50); + // In PLAYBACK mode, use a placeholder URL to ensure consistent test behavior + String trainingDataSasUrl = getTestMode() == TestMode.PLAYBACK + ? "https://placeholder.blob.core.windows.net/container?sv=placeholder" + : System.getenv("TRAINING_DATA_SAS_URL"); + + try { + // BEGIN: com.azure.ai.contentunderstanding.createAnalyzerWithLabels + // Step 1: Define field schema for receipt extraction + Map fields = new HashMap<>(); + + // MerchantName field + ContentFieldDefinition merchantNameField = new ContentFieldDefinition(); + merchantNameField.setType(ContentFieldType.STRING); + merchantNameField.setMethod(GenerationMethod.EXTRACT); + merchantNameField.setDescription("Name of the merchant"); + fields.put("MerchantName", merchantNameField); + + // Items array field - define item structure + ContentFieldDefinition itemDefinition = new ContentFieldDefinition(); + itemDefinition.setType(ContentFieldType.OBJECT); + itemDefinition.setMethod(GenerationMethod.EXTRACT); + itemDefinition.setDescription("Individual item details"); + + Map itemProperties = new HashMap<>(); + + ContentFieldDefinition quantityField = new ContentFieldDefinition(); + quantityField.setType(ContentFieldType.STRING); + quantityField.setMethod(GenerationMethod.EXTRACT); + quantityField.setDescription("Quantity of the item"); + itemProperties.put("Quantity", quantityField); + + ContentFieldDefinition nameField = new ContentFieldDefinition(); + nameField.setType(ContentFieldType.STRING); + nameField.setMethod(GenerationMethod.EXTRACT); + nameField.setDescription("Name of the item"); + itemProperties.put("Name", nameField); + + ContentFieldDefinition priceField = new ContentFieldDefinition(); + priceField.setType(ContentFieldType.STRING); + priceField.setMethod(GenerationMethod.EXTRACT); + priceField.setDescription("Price of the item"); + itemProperties.put("Price", priceField); + + itemDefinition.setProperties(itemProperties); + + // Items array field + ContentFieldDefinition itemsField = new ContentFieldDefinition(); + itemsField.setType(ContentFieldType.ARRAY); + itemsField.setMethod(GenerationMethod.GENERATE); + itemsField.setDescription("List of items purchased"); + itemsField.setItemDefinition(itemDefinition); + fields.put("Items", itemsField); + + // Total field + ContentFieldDefinition totalField = new ContentFieldDefinition(); + totalField.setType(ContentFieldType.STRING); + totalField.setMethod(GenerationMethod.EXTRACT); + totalField.setDescription("Total amount"); + fields.put("Total", totalField); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("receipt_schema"); + fieldSchema.setDescription("Schema for receipt extraction with items"); + fieldSchema.setFields(fields); + + // Step 2: Create labeled data knowledge source (optional, based on environment variable) + List knowledgeSources = new ArrayList<>(); + if (trainingDataSasUrl != null && !trainingDataSasUrl.trim().isEmpty()) { + LabeledDataKnowledgeSource knowledgeSource + = new LabeledDataKnowledgeSource().setContainerUrl(trainingDataSasUrl); + knowledgeSources.add(knowledgeSource); + System.out.println("Using labeled training data from: " + + trainingDataSasUrl.substring(0, Math.min(50, trainingDataSasUrl.length())) + "..."); + } else { + System.out.println("No TRAINING_DATA_SAS_URL set, creating analyzer without labeled training data"); + } + + // Step 3: Create analyzer (with or without labeled data) + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Receipt analyzer with labeled training data") + .setConfig(new ContentAnalyzerConfig().setEnableLayout(true).setEnableOcr(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + if (!knowledgeSources.isEmpty()) { + analyzer.setKnowledgeSources(knowledgeSources); + } + + SyncPoller createPoller + = contentUnderstandingClient.beginCreateAnalyzer(analyzerId, analyzer); + ContentAnalyzer result = createPoller.getFinalResult(); + + System.out.println("Analyzer created: " + analyzerId); + System.out.println(" Description: " + result.getDescription()); + System.out.println(" Base analyzer: " + result.getBaseAnalyzerId()); + System.out.println(" Fields: " + result.getFieldSchema().getFields().size()); + // END: com.azure.ai.contentunderstanding.createAnalyzerWithLabels + + // BEGIN: Assertion_ContentUnderstandingCreateAnalyzerWithLabels + // Verify analyzer creation + System.out.println("\n📋 Analyzer Creation Verification:"); + assertNotNull(result, "Analyzer should not be null"); + assertEquals("prebuilt-document", result.getBaseAnalyzerId()); + assertEquals("Receipt analyzer with labeled training data", result.getDescription()); + assertNotNull(result.getFieldSchema()); + assertEquals("receipt_schema", result.getFieldSchema().getName()); + assertEquals(3, result.getFieldSchema().getFields().size()); + System.out.println("Analyzer created successfully"); + + // Verify field schema + Map resultFields = result.getFieldSchema().getFields(); + assertTrue(resultFields.containsKey("MerchantName"), "Should have MerchantName field"); + assertTrue(resultFields.containsKey("Items"), "Should have Items field"); + assertTrue(resultFields.containsKey("Total"), "Should have Total field"); + + ContentFieldDefinition itemsFieldResult = resultFields.get("Items"); + assertEquals(ContentFieldType.ARRAY, itemsFieldResult.getType()); + assertNotNull(itemsFieldResult.getItemDefinition()); + assertEquals(ContentFieldType.OBJECT, itemsFieldResult.getItemDefinition().getType()); + assertEquals(3, itemsFieldResult.getItemDefinition().getProperties().size()); + System.out.println("Field schema verified:"); + System.out.println(" MerchantName: String (Extract)"); + System.out.println(" Items: Array of Objects (Generate)"); + System.out.println(" - Quantity, Name, Price"); + System.out.println(" Total: String (Extract)"); + // END: Assertion_ContentUnderstandingCreateAnalyzerWithLabels + + // If training data was provided, test the analyzer with a sample document + if (trainingDataSasUrl != null && !trainingDataSasUrl.trim().isEmpty()) { + System.out.println("\n📄 Testing analyzer with sample document..."); + String testDocUrl + = "https://github.com/Azure-Samples/cognitive-services-REST-api-samples/raw/master/curl/form-recognizer/sample-invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(testDocUrl); + + AnalyzeResult analyzeResult + = contentUnderstandingClient.beginAnalyze(analyzerId, Arrays.asList(input)).getFinalResult(); + + System.out.println("Analysis completed!"); + assertNotNull(analyzeResult); + assertNotNull(analyzeResult.getContents()); + assertTrue(analyzeResult.getContents().size() > 0); + + if (analyzeResult.getContents().get(0) instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) analyzeResult.getContents().get(0); + System.out.println("Extracted fields: " + docContent.getFields().size()); + + // Display extracted values + if (docContent.getFields().containsKey("MerchantName")) { + ContentField merchantField = docContent.getFields().get("MerchantName"); + if (merchantField != null) { + String merchantName = (String) merchantField.getValue(); + System.out.println(" MerchantName: " + merchantName); + } + } + if (docContent.getFields().containsKey("Total")) { + ContentField totalFieldValue = docContent.getFields().get("Total"); + if (totalFieldValue != null) { + String total = (String) totalFieldValue.getValue(); + System.out.println(" Total: " + total); + } + } + } + } + + // Display API pattern information + System.out.println("\n📚 CreateAnalyzerWithLabels API Pattern:"); + System.out.println(" 1. Define field schema with nested structures (arrays, objects)"); + System.out.println(" 2. Upload training data to Azure Blob Storage:"); + System.out.println(" - Documents: receipt1.pdf, receipt2.pdf, ..."); + System.out.println(" - Labels: receipt1.pdf.labels.json, receipt2.pdf.labels.json, ..."); + System.out.println(" - OCR: receipt1.pdf.result.json, receipt2.pdf.result.json, ..."); + System.out.println(" 3. Create LabeledDataKnowledgeSource with storage SAS URL"); + System.out.println(" 4. Create analyzer with field schema and knowledge sources"); + System.out.println(" 5. Use analyzer for document analysis"); + + System.out.println("\n✅ CreateAnalyzerWithLabels pattern demonstration completed"); + if (trainingDataSasUrl == null || trainingDataSasUrl.trim().isEmpty()) { + System.out.println(" Note: This sample demonstrates the API pattern."); + System.out.println(" For actual training, provide TRAINING_DATA_SAS_URL with labeled data."); + } + + } finally { + // Cleanup + try { + contentUnderstandingClient.deleteAnalyzer(analyzerId); + System.out.println("\nAnalyzer deleted: " + analyzerId); + } catch (Exception e) { + System.out.println("Note: Failed to delete analyzer: " + e.getMessage()); + } + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample16_CreateAnalyzerWithLabelsAsync.java b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample16_CreateAnalyzerWithLabelsAsync.java new file mode 100644 index 000000000000..f32106af9d34 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/java/com/azure/ai/contentunderstanding/tests/samples/Sample16_CreateAnalyzerWithLabelsAsync.java @@ -0,0 +1,275 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.contentunderstanding.tests.samples; + +import com.azure.ai.contentunderstanding.models.AnalyzeInput; +import com.azure.ai.contentunderstanding.models.AnalyzeResult; +import com.azure.ai.contentunderstanding.models.ContentAnalyzer; +import com.azure.ai.contentunderstanding.models.ContentAnalyzerConfig; +import com.azure.ai.contentunderstanding.models.ContentField; +import com.azure.ai.contentunderstanding.models.ContentFieldDefinition; +import com.azure.ai.contentunderstanding.models.ContentFieldSchema; +import com.azure.ai.contentunderstanding.models.ContentFieldType; +import com.azure.ai.contentunderstanding.models.DocumentContent; +import com.azure.ai.contentunderstanding.models.GenerationMethod; +import com.azure.ai.contentunderstanding.models.KnowledgeSource; +import com.azure.ai.contentunderstanding.models.LabeledDataKnowledgeSource; +import com.azure.core.util.polling.PollerFlux; +import reactor.core.publisher.Mono; +import org.junit.jupiter.api.Test; + +import com.azure.core.test.TestMode; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Async sample demonstrates how to create an analyzer with labeled training data from Azure Blob Storage. + * + * Required environment variables: + * - CONTENTUNDERSTANDING_ENDPOINT: Azure Content Understanding endpoint URL + * - CONTENTUNDERSTANDING_KEY: Azure Content Understanding API key (optional if using DefaultAzureCredential) + * + * Optional environment variables: + * - TRAINING_DATA_SAS_URL: SAS URL for the container with labeled training data + * If set, the analyzer will be created with labeled data knowledge source. + * If not set, the analyzer will be created without training data (demonstration mode). + */ +public class Sample16_CreateAnalyzerWithLabelsAsync extends ContentUnderstandingClientTestBase { + + /** + * Demonstrates creating an analyzer with labeled training data. + * + * This test creates an analyzer with field schema. If TRAINING_DATA_SAS_URL is provided, + * labeled training data will be used; otherwise, it demonstrates the API pattern without + * actual training data. + */ + @Test + public void testCreateAnalyzerWithLabelsAsync() { + + String analyzerId = testResourceNamer.randomName("test_receipt_analyzer_", 50); + // In PLAYBACK mode, use a placeholder URL to ensure consistent test behavior + String trainingDataSasUrl = getTestMode() == TestMode.PLAYBACK + ? "https://placeholder.blob.core.windows.net/container?sv=placeholder" + : System.getenv("TRAINING_DATA_SAS_URL"); + + try { + // BEGIN: com.azure.ai.contentunderstanding.createAnalyzerWithLabelsAsync + // Step 1: Define field schema for receipt extraction + Map fields = new HashMap<>(); + + // MerchantName field + ContentFieldDefinition merchantNameField = new ContentFieldDefinition(); + merchantNameField.setType(ContentFieldType.STRING); + merchantNameField.setMethod(GenerationMethod.EXTRACT); + merchantNameField.setDescription("Name of the merchant"); + fields.put("MerchantName", merchantNameField); + + // Items array field - define item structure + ContentFieldDefinition itemDefinition = new ContentFieldDefinition(); + itemDefinition.setType(ContentFieldType.OBJECT); + itemDefinition.setMethod(GenerationMethod.EXTRACT); + itemDefinition.setDescription("Individual item details"); + + Map itemProperties = new HashMap<>(); + + ContentFieldDefinition quantityField = new ContentFieldDefinition(); + quantityField.setType(ContentFieldType.STRING); + quantityField.setMethod(GenerationMethod.EXTRACT); + quantityField.setDescription("Quantity of the item"); + itemProperties.put("Quantity", quantityField); + + ContentFieldDefinition nameField = new ContentFieldDefinition(); + nameField.setType(ContentFieldType.STRING); + nameField.setMethod(GenerationMethod.EXTRACT); + nameField.setDescription("Name of the item"); + itemProperties.put("Name", nameField); + + ContentFieldDefinition priceField = new ContentFieldDefinition(); + priceField.setType(ContentFieldType.STRING); + priceField.setMethod(GenerationMethod.EXTRACT); + priceField.setDescription("Price of the item"); + itemProperties.put("Price", priceField); + + itemDefinition.setProperties(itemProperties); + + // Items array field + ContentFieldDefinition itemsField = new ContentFieldDefinition(); + itemsField.setType(ContentFieldType.ARRAY); + itemsField.setMethod(GenerationMethod.GENERATE); + itemsField.setDescription("List of items purchased"); + itemsField.setItemDefinition(itemDefinition); + fields.put("Items", itemsField); + + // Total field + ContentFieldDefinition totalField = new ContentFieldDefinition(); + totalField.setType(ContentFieldType.STRING); + totalField.setMethod(GenerationMethod.EXTRACT); + totalField.setDescription("Total amount"); + fields.put("Total", totalField); + + ContentFieldSchema fieldSchema = new ContentFieldSchema(); + fieldSchema.setName("receipt_schema"); + fieldSchema.setDescription("Schema for receipt extraction with items"); + fieldSchema.setFields(fields); + + // Step 2: Create labeled data knowledge source (optional, based on environment variable) + List knowledgeSources = new ArrayList<>(); + if (trainingDataSasUrl != null && !trainingDataSasUrl.trim().isEmpty()) { + LabeledDataKnowledgeSource knowledgeSource + = new LabeledDataKnowledgeSource().setContainerUrl(trainingDataSasUrl); + knowledgeSources.add(knowledgeSource); + System.out.println("Using labeled training data from: " + + trainingDataSasUrl.substring(0, Math.min(50, trainingDataSasUrl.length())) + "..."); + } else { + System.out.println("No TRAINING_DATA_SAS_URL set, creating analyzer without labeled training data"); + } + + // Step 3: Create analyzer (with or without labeled data) + Map models = new HashMap<>(); + models.put("completion", "gpt-4.1"); + models.put("embedding", "text-embedding-3-large"); + + ContentAnalyzer analyzer = new ContentAnalyzer().setBaseAnalyzerId("prebuilt-document") + .setDescription("Receipt analyzer with labeled training data") + .setConfig(new ContentAnalyzerConfig().setEnableLayout(true).setEnableOcr(true)) + .setFieldSchema(fieldSchema) + .setModels(models); + + if (!knowledgeSources.isEmpty()) { + analyzer.setKnowledgeSources(knowledgeSources); + } + + PollerFlux createPoller + = contentUnderstandingAsyncClient.beginCreateAnalyzer(analyzerId, analyzer); + + // Use reactive pattern: chain operations using flatMap + // In a real application, you would use subscribe() instead of block() + ContentAnalyzer result = createPoller.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + System.out.println("Analyzer created: " + analyzerId); + System.out.println(" Description: " + result.getDescription()); + System.out.println(" Base analyzer: " + result.getBaseAnalyzerId()); + System.out.println(" Fields: " + result.getFieldSchema().getFields().size()); + // END: com.azure.ai.contentunderstanding.createAnalyzerWithLabelsAsync + + // BEGIN: Assertion_ContentUnderstandingCreateAnalyzerWithLabelsAsync + // Verify analyzer creation + System.out.println("\n📋 Analyzer Creation Verification:"); + assertNotNull(result, "Analyzer should not be null"); + assertEquals("prebuilt-document", result.getBaseAnalyzerId()); + assertEquals("Receipt analyzer with labeled training data", result.getDescription()); + assertNotNull(result.getFieldSchema()); + assertEquals("receipt_schema", result.getFieldSchema().getName()); + assertEquals(3, result.getFieldSchema().getFields().size()); + System.out.println("Analyzer created successfully"); + + // Verify field schema + Map resultFields = result.getFieldSchema().getFields(); + assertTrue(resultFields.containsKey("MerchantName"), "Should have MerchantName field"); + assertTrue(resultFields.containsKey("Items"), "Should have Items field"); + assertTrue(resultFields.containsKey("Total"), "Should have Total field"); + + ContentFieldDefinition itemsFieldResult = resultFields.get("Items"); + assertEquals(ContentFieldType.ARRAY, itemsFieldResult.getType()); + assertNotNull(itemsFieldResult.getItemDefinition()); + assertEquals(ContentFieldType.OBJECT, itemsFieldResult.getItemDefinition().getType()); + assertEquals(3, itemsFieldResult.getItemDefinition().getProperties().size()); + System.out.println("Field schema verified:"); + System.out.println(" MerchantName: String (Extract)"); + System.out.println(" Items: Array of Objects (Generate)"); + System.out.println(" - Quantity, Name, Price"); + System.out.println(" Total: String (Extract)"); + // END: Assertion_ContentUnderstandingCreateAnalyzerWithLabelsAsync + + // If training data was provided, test the analyzer with a sample document + if (trainingDataSasUrl != null && !trainingDataSasUrl.trim().isEmpty()) { + System.out.println("\n📄 Testing analyzer with sample document..."); + String testDocUrl + = "https://github.com/Azure-Samples/cognitive-services-REST-api-samples/raw/master/curl/form-recognizer/sample-invoice.pdf"; + + AnalyzeInput input = new AnalyzeInput(); + input.setUrl(testDocUrl); + + PollerFlux analyzePoller + = contentUnderstandingAsyncClient.beginAnalyze(analyzerId, Arrays.asList(input)); + + // Use reactive pattern for analyze operation + AnalyzeResult analyzeResult = analyzePoller.last().flatMap(pollResponse -> { + if (pollResponse.getStatus().isComplete()) { + return pollResponse.getFinalResult(); + } else { + return Mono.error(new RuntimeException( + "Polling completed unsuccessfully with status: " + pollResponse.getStatus())); + } + }).block(); // block() is used here for testing; in production, use subscribe() + + System.out.println("Analysis completed!"); + assertNotNull(analyzeResult); + assertNotNull(analyzeResult.getContents()); + assertTrue(analyzeResult.getContents().size() > 0); + + if (analyzeResult.getContents().get(0) instanceof DocumentContent) { + DocumentContent docContent = (DocumentContent) analyzeResult.getContents().get(0); + System.out.println("Extracted fields: " + docContent.getFields().size()); + + // Display extracted values + if (docContent.getFields().containsKey("MerchantName")) { + ContentField merchantField = docContent.getFields().get("MerchantName"); + if (merchantField != null) { + String merchantName = (String) merchantField.getValue(); + System.out.println(" MerchantName: " + merchantName); + } + } + if (docContent.getFields().containsKey("Total")) { + ContentField totalFieldValue = docContent.getFields().get("Total"); + if (totalFieldValue != null) { + String total = (String) totalFieldValue.getValue(); + System.out.println(" Total: " + total); + } + } + } + } + + // Display API pattern information + System.out.println("\n📚 CreateAnalyzerWithLabels API Pattern:"); + System.out.println(" 1. Define field schema with nested structures (arrays, objects)"); + System.out.println(" 2. Upload training data to Azure Blob Storage:"); + System.out.println(" - Documents: receipt1.pdf, receipt2.pdf, ..."); + System.out.println(" - Labels: receipt1.pdf.labels.json, receipt2.pdf.labels.json, ..."); + System.out.println(" - OCR: receipt1.pdf.result.json, receipt2.pdf.result.json, ..."); + System.out.println(" 3. Create LabeledDataKnowledgeSource with storage SAS URL"); + System.out.println(" 4. Create analyzer with field schema and knowledge sources"); + System.out.println(" 5. Use analyzer for document analysis"); + + System.out.println("\n✅ CreateAnalyzerWithLabels pattern demonstration completed"); + if (trainingDataSasUrl == null || trainingDataSasUrl.trim().isEmpty()) { + System.out.println(" Note: This sample demonstrates the API pattern."); + System.out.println(" For actual training, provide TRAINING_DATA_SAS_URL with labeled data."); + } + + } finally { + // Cleanup + try { + contentUnderstandingAsyncClient.deleteAnalyzer(analyzerId).block(); + System.out.println("\nAnalyzer deleted: " + analyzerId); + } catch (Exception e) { + System.out.println("Note: Failed to delete analyzer: " + e.getMessage()); + } + } + } +} diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/resources/mixed_financial_docs.pdf b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/resources/mixed_financial_docs.pdf new file mode 100644 index 000000000000..2c6d57818e11 Binary files /dev/null and b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/resources/mixed_financial_docs.pdf differ diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/resources/sample_document_features.pdf b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/resources/sample_document_features.pdf new file mode 100644 index 000000000000..9f47030c0377 Binary files /dev/null and b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/resources/sample_document_features.pdf differ diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/resources/sample_invoice.pdf b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/resources/sample_invoice.pdf new file mode 100644 index 000000000000..812bcd9b30f3 Binary files /dev/null and b/sdk/contentunderstanding/azure-ai-contentunderstanding/src/test/resources/sample_invoice.pdf differ diff --git a/sdk/contentunderstanding/azure-ai-contentunderstanding/tsp-location.yaml b/sdk/contentunderstanding/azure-ai-contentunderstanding/tsp-location.yaml new file mode 100644 index 000000000000..d1779ea4a6b6 --- /dev/null +++ b/sdk/contentunderstanding/azure-ai-contentunderstanding/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/ai/ContentUnderstanding +commit: 5fdd87d51fd8d9f030d7d96ca678aa029877d843 +repo: Azure/azure-rest-api-specs +additionalDirectories: diff --git a/sdk/contentunderstanding/ci.yml b/sdk/contentunderstanding/ci.yml new file mode 100644 index 000000000000..4339919ff607 --- /dev/null +++ b/sdk/contentunderstanding/ci.yml @@ -0,0 +1,46 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. + +trigger: + branches: + include: + - main + - hotfix/* + - release/* + paths: + include: + - sdk/contentunderstanding/ci.yml + - sdk/contentunderstanding/azure-ai-contentunderstanding/ + exclude: + - sdk/contentunderstanding/pom.xml + - sdk/contentunderstanding/azure-ai-contentunderstanding/pom.xml + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/contentunderstanding/ci.yml + - sdk/contentunderstanding/azure-ai-contentunderstanding/ + exclude: + - sdk/contentunderstanding/pom.xml + - sdk/contentunderstanding/azure-ai-contentunderstanding/pom.xml + +parameters: + - name: release_azureaicontentunderstanding + displayName: azure-ai-contentunderstanding + type: boolean + default: true + +extends: + template: ../../eng/pipelines/templates/stages/archetype-sdk-client.yml + parameters: + ServiceDirectory: contentunderstanding + Artifacts: + - name: azure-ai-contentunderstanding + groupId: com.azure + safeName: azureaicontentunderstanding + releaseInBatch: ${{ parameters.release_azureaicontentunderstanding }} diff --git a/sdk/contentunderstanding/pom.xml b/sdk/contentunderstanding/pom.xml new file mode 100644 index 000000000000..a381ade59335 --- /dev/null +++ b/sdk/contentunderstanding/pom.xml @@ -0,0 +1,15 @@ + + + 4.0.0 + com.azure + azure-contentunderstanding-service + pom + 1.0.0 + + + azure-ai-contentunderstanding + + diff --git a/sdk/contentunderstanding/test-resources-post.ps1 b/sdk/contentunderstanding/test-resources-post.ps1 new file mode 100644 index 000000000000..ef6ea802bc79 --- /dev/null +++ b/sdk/contentunderstanding/test-resources-post.ps1 @@ -0,0 +1,414 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# This script is used to deploy model deployments to the Foundry resources after the main ARM template deployment. +# It is invoked by the New-TestResources.ps1 script after the ARM template is finished being deployed. +# The ARM template creates the Foundry resources, and this script deploys the required models. +# After model deployments are complete, it calls the Content Understanding UpdateDefaults API to configure +# the default model deployment mappings. + +param ( + [hashtable] $DeploymentOutputs, + [string] $ResourceGroupName +) + +# Get resource IDs from deployment outputs +$primaryResourceId = $DeploymentOutputs['CONTENTUNDERSTANDING_SOURCE_RESOURCE_ID'] +$copyTargetResourceId = $DeploymentOutputs['CONTENTUNDERSTANDING_TARGET_RESOURCE_ID'] + +if (-not $primaryResourceId) { + Write-Error "CONTENTUNDERSTANDING_SOURCE_RESOURCE_ID (Primary Microsoft Foundry resource ID) not found in deployment outputs" + exit 1 +} + +if (-not $copyTargetResourceId) { + Write-Error "CONTENTUNDERSTANDING_TARGET_RESOURCE_ID (Copy target Microsoft Foundry resource ID) not found in deployment outputs" + exit 1 +} + +# Extract account names from resource IDs +# Format: /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.CognitiveServices/accounts/{accountName} +$primaryAccountName = $primaryResourceId -replace '^.*/accounts/', '' +$copyTargetAccountName = $copyTargetResourceId -replace '^.*/accounts/', '' + +# Get endpoints from deployment outputs +$primaryEndpoint = $DeploymentOutputs['CONTENTUNDERSTANDING_ENDPOINT'] +$copyTargetEndpoint = $DeploymentOutputs['CONTENTUNDERSTANDING_TARGET_ENDPOINT'] + +if (-not $primaryEndpoint) { + Write-Error "CONTENTUNDERSTANDING_ENDPOINT (Primary Microsoft Foundry endpoint) not found in deployment outputs" + exit 1 +} + +if (-not $copyTargetEndpoint) { + Write-Error "CONTENTUNDERSTANDING_TARGET_ENDPOINT (Copy target Microsoft Foundry endpoint) not found in deployment outputs" + exit 1 +} + +Write-Host "Deploying models to Primary Microsoft Foundry resource: $primaryAccountName" +Write-Host "Deploying models to copy target Foundry resource: $copyTargetAccountName" + +# Model deployment configurations +$modelConfigs = @( + @{ + Name = 'gpt-4.1' + ModelName = 'gpt-4.1' + Format = 'OpenAI' + Version = '2025-04-14' + SkuName = 'Standard' + SkuCapacity = 150 # Rate limit: 150,000 tokens per minute + }, + @{ + Name = 'gpt-4.1-mini' + ModelName = 'gpt-4.1-mini' + Format = 'OpenAI' + Version = '2025-04-14' + SkuName = 'Standard' + SkuCapacity = 150 # Rate limit: 150,000 tokens per minute + }, + @{ + Name = 'text-embedding-3-large' + ModelName = 'text-embedding-3-large' + Format = 'OpenAI' + Version = '1' + SkuName = 'GlobalStandard' + SkuCapacity = 100 # Rate limit: 100,000 tokens per minute + } +) + +# Function to deploy a model using Azure CLI +# Returns $true if successful, $false if failed +function Deploy-Model { + param ( + [string] $ResourceGroupName, + [string] $AccountName, + [string] $DeploymentName, + [string] $ModelName, + [string] $ModelFormat, + [string] $ModelVersion, + [string] $SkuName, + [int] $SkuCapacity + ) + + Write-Host "Deploying model '$ModelName' as deployment '$DeploymentName' to account '$AccountName'..." + + try { + # Check if deployment already exists + $null = az cognitiveservices account deployment show ` + --resource-group $ResourceGroupName ` + --name $AccountName ` + --deployment-name $DeploymentName ` + 2>&1 + + if ($LASTEXITCODE -eq 0) { + Write-Host "Deployment '$DeploymentName' already exists, skipping..." + return $true + } + + # Build Azure CLI command arguments + $azArgs = @( + 'cognitiveservices', 'account', 'deployment', 'create', + '--resource-group', $ResourceGroupName, + '--name', $AccountName, + '--deployment-name', $DeploymentName, + '--model-format', $ModelFormat, + '--model-name', $ModelName, + '--model-version', $ModelVersion, + '--output', 'json' + ) + + # Add SKU parameters only if specified + if ($SkuName) { + $azArgs += '--sku-name', $SkuName + } + if ($SkuCapacity -gt 0) { + $azArgs += '--sku-capacity', $SkuCapacity.ToString() + } + + # Create deployment using Azure CLI + # Note: Azure CLI requires individual parameters, not a JSON body + # Note: --rai-policy-name and --version-upgrade-option are not supported in current Azure CLI version + $deploymentJson = & az $azArgs 2>&1 + + if ($LASTEXITCODE -eq 0) { + $deployment = $deploymentJson | ConvertFrom-Json + Write-Host "Successfully deployed '$DeploymentName' (Status: $($deployment.properties.provisioningState))" -ForegroundColor Green + return $true + } + else { + Write-Error "FAILED to deploy '$DeploymentName': $deploymentJson" -ErrorAction Continue + return $false + } + } + catch { + Write-Error "FAILED to deploy '$DeploymentName': $_" -ErrorAction Continue + return $false + } +} + +# Function to wait for a deployment to be ready (provisioning state = Succeeded) +# Returns $true if deployment is ready, $false if timeout or failed +function Wait-ForDeployment { + param ( + [string] $ResourceGroupName, + [string] $AccountName, + [string] $DeploymentName, + [int] $MaxWaitMinutes = 15, + [int] $PollIntervalSeconds = 30 + ) + + Write-Host "Waiting for deployment '$DeploymentName' to be ready..." + $startTime = Get-Date + $maxWaitTime = $startTime.AddMinutes($MaxWaitMinutes) + + while ((Get-Date) -lt $maxWaitTime) { + try { + $deploymentJson = az cognitiveservices account deployment show ` + --resource-group $ResourceGroupName ` + --name $AccountName ` + --deployment-name $DeploymentName ` + --output json 2>&1 + + if ($LASTEXITCODE -eq 0) { + $deployment = $deploymentJson | ConvertFrom-Json + $provisioningState = $deployment.properties.provisioningState + + if ($provisioningState -eq 'Succeeded') { + Write-Host "Deployment '$DeploymentName' is ready (Status: $provisioningState)" -ForegroundColor Green + return $true + } + elseif ($provisioningState -eq 'Failed') { + Write-Error "Deployment '$DeploymentName' failed" -ErrorAction Continue + return $false + } + else { + Write-Host "Deployment '$DeploymentName' status: $provisioningState (waiting...)" + } + } + else { + Write-Host "Could not check deployment status, will retry..." + } + } + catch { + Write-Host "Error checking deployment status: $_, will retry..." + } + + Start-Sleep -Seconds $PollIntervalSeconds + } + + Write-Warning "Timeout waiting for deployment '$DeploymentName' to be ready after $MaxWaitMinutes minutes" + return $false +} + +# Function to call Content Understanding UpdateDefaults API using az rest +# Returns $true if successful, $false if failed +# Retries on DeploymentIdNotFound errors to handle propagation delay +function Update-ContentUnderstandingDefaults { + param ( + [string] $Endpoint, + [string] $AccountName, + [hashtable] $ModelDeployments, + [int] $MaxRetries = 10, + [int] $RetryDelaySeconds = 30 + ) + + Write-Host "Updating Content Understanding defaults for account '$AccountName'..." + + # Build the request body JSON + # Format: { "modelDeployments": { "gpt-4.1": "gpt-4.1", "gpt-4.1-mini": "gpt-4.1-mini", "text-embedding-3-large": "text-embedding-3-large" } } + $modelDeploymentsJson = @{} + foreach ($kvp in $ModelDeployments.GetEnumerator()) { + $modelDeploymentsJson[$kvp.Key] = $kvp.Value + } + $requestBody = @{ + modelDeployments = $modelDeploymentsJson + } | ConvertTo-Json -Depth 10 -Compress + + # Call UpdateDefaults API using az rest + # Endpoint: {endpoint}/contentunderstanding/defaults?api-version=2025-11-01 + # Method: PATCH + # Content-Type: application/merge-patch+json + # Note: az rest will automatically determine the resource from the URL for known endpoints + $apiUrl = "$($Endpoint.TrimEnd('/'))/contentunderstanding/defaults?api-version=2025-11-01" + + # Use the Cognitive Services resource URL for authentication + # For Azure Cognitive Services, the resource identifier is https://cognitiveservices.azure.com + $resourceUrl = "https://cognitiveservices.azure.com" + + $attempt = 0 + while ($attempt -lt $MaxRetries) { + $attempt++ + + if ($attempt -gt 1) { + Write-Host "Retry attempt $attempt of $MaxRetries (waiting $RetryDelaySeconds seconds for deployment propagation)..." + Start-Sleep -Seconds $RetryDelaySeconds + } + else { + Write-Host "Calling UpdateDefaults API: $apiUrl" + Write-Host "Request body: $requestBody" + } + + try { + $response = az rest --method patch ` + --url $apiUrl ` + --resource $resourceUrl ` + --headers "Content-Type=application/merge-patch+json" ` + --body $requestBody ` + --output json 2>&1 + + if ($LASTEXITCODE -eq 0) { + $result = $response | ConvertFrom-Json + Write-Host "Successfully updated Content Understanding defaults for '$AccountName'" -ForegroundColor Green + if ($result.modelDeployments) { + Write-Host "Configured model deployments:" + foreach ($kvp in $result.modelDeployments.PSObject.Properties) { + Write-Host " $($kvp.Name): $($kvp.Value)" + } + } + return $true + } + else { + # Check if the error is DeploymentIdNotFound (propagation delay) + $errorMessage = $response -join " " + if ($errorMessage -match "DeploymentIdNotFound") { + if ($attempt -lt $MaxRetries) { + Write-Host "Deployment not yet visible to Content Understanding API (attempt $attempt/$MaxRetries). This is normal due to propagation delay." -ForegroundColor Yellow + continue + } + else { + Write-Error "FAILED to update Content Understanding defaults for '$AccountName' after $MaxRetries attempts: Deployment still not visible to API after waiting. $errorMessage" -ErrorAction Continue + return $false + } + } + else { + # Non-propagation error - don't retry + Write-Error "FAILED to update Content Understanding defaults for '$AccountName': $errorMessage" -ErrorAction Continue + return $false + } + } + } + catch { + Write-Error "FAILED to update Content Understanding defaults for '$AccountName': $_" -ErrorAction Continue + return $false + } + } + + return $false +} + +# Deploy models to Primary Microsoft Foundry resource +foreach ($model in $modelConfigs) { + $result = Deploy-Model ` + -ResourceGroupName $ResourceGroupName ` + -AccountName $primaryAccountName ` + -DeploymentName $model.Name ` + -ModelName $model.ModelName ` + -ModelFormat $model.Format ` + -ModelVersion $model.Version ` + -SkuName $model.SkuName ` + -SkuCapacity $model.SkuCapacity + if (-not $result) { + Write-Error "Failed to deploy '$($model.Name)' to Primary Microsoft Foundry resource. Exiting." -ErrorAction Stop + exit 1 + } +} + +# Deploy models to copy target resource +foreach ($model in $modelConfigs) { + $result = Deploy-Model ` + -ResourceGroupName $ResourceGroupName ` + -AccountName $copyTargetAccountName ` + -DeploymentName $model.Name ` + -ModelName $model.ModelName ` + -ModelFormat $model.Format ` + -ModelVersion $model.Version ` + -SkuName $model.SkuName ` + -SkuCapacity $model.SkuCapacity + if (-not $result) { + Write-Error "Failed to deploy '$($model.Name)' to copy target resource. Exiting." -ErrorAction Stop + exit 1 + } +} + +Write-Host "" +Write-Host "Model deployment script completed successfully." -ForegroundColor Green +Write-Host "" +Write-Host "IMPORTANT: Model deployments may take 5-15 minutes to propagate to the Content Understanding API." -ForegroundColor Yellow +Write-Host "Even though deployments show 'Succeeded' in Azure Resource Manager, the Content Understanding" -ForegroundColor Yellow +Write-Host "API may not see them immediately. If tests fail with 'DeploymentIdNotFound', wait a few" -ForegroundColor Yellow +Write-Host "more minutes and retry the tests." -ForegroundColor Yellow + +# Wait for deployments to be ready before calling UpdateDefaults +Write-Host "" +Write-Host "Waiting for model deployments to be ready before updating Content Understanding defaults..." -ForegroundColor Cyan + +$allDeploymentsReady = $true + +# Wait for Primary Microsoft Foundry resource deployments +Write-Host "Checking Primary Microsoft Foundry resource deployments..." +foreach ($model in $modelConfigs) { + $isReady = Wait-ForDeployment ` + -ResourceGroupName $ResourceGroupName ` + -AccountName $primaryAccountName ` + -DeploymentName $model.Name ` + -MaxWaitMinutes 15 ` + -PollIntervalSeconds 30 + if (-not $isReady) { + $allDeploymentsReady = $false + } +} + +# Wait for copy target resource deployments +Write-Host "Checking copy target resource deployments..." +foreach ($model in $modelConfigs) { + $isReady = Wait-ForDeployment ` + -ResourceGroupName $ResourceGroupName ` + -AccountName $copyTargetAccountName ` + -DeploymentName $model.Name ` + -MaxWaitMinutes 15 ` + -PollIntervalSeconds 30 + if (-not $isReady) { + $allDeploymentsReady = $false + } +} + +if ($allDeploymentsReady) { + Write-Host "" + Write-Host "All deployments are ready. Updating Content Understanding defaults..." -ForegroundColor Cyan + + # Build model deployments mapping (model name -> deployment name) + # The deployment name is the same as the model name in our configuration + $modelDeployments = @{} + foreach ($model in $modelConfigs) { + if ($null -ne $model.Name -and -not [string]::IsNullOrWhiteSpace([string]$model.Name)) { + $modelDeployments[$model.Name] = $model.Name + } + } + + # Update defaults for Primary Microsoft Foundry resource + $updatePrimaryResult = Update-ContentUnderstandingDefaults ` + -Endpoint $primaryEndpoint ` + -AccountName $primaryAccountName ` + -ModelDeployments $modelDeployments + + # Update defaults for copy target resource + $updateCopyTargetResult = Update-ContentUnderstandingDefaults ` + -Endpoint $copyTargetEndpoint ` + -AccountName $copyTargetAccountName ` + -ModelDeployments $modelDeployments + + if ($updatePrimaryResult -and $updateCopyTargetResult) { + Write-Host "" + Write-Host "Content Understanding defaults updated successfully for both resources!" -ForegroundColor Green + } + else { + Write-Host "" + Write-Warning "Some UpdateDefaults calls may have failed. Check the error messages above." + } +} +else { + Write-Host "" + Write-Warning "Not all deployments are ready. Skipping UpdateDefaults API call." + Write-Warning "You may need to manually call UpdateDefaults after deployments are ready." +} + diff --git a/sdk/contentunderstanding/test-resources.bicep b/sdk/contentunderstanding/test-resources.bicep new file mode 100644 index 000000000000..18b3aaf75c32 --- /dev/null +++ b/sdk/contentunderstanding/test-resources.bicep @@ -0,0 +1,139 @@ +// ============================================================================ +// Azure Content Understanding SDK Test Resources +// ============================================================================ +// This Bicep template creates the following Azure resources for testing: +// +// Resources Created: +// 1. Primary Microsoft Foundry resource (Microsoft.CognitiveServices/accounts) +// - Primary resource for testing Content Understanding functionality +// - Kind: AIServices, SKU: S0 +// 2. Secondary Microsoft Foundry resource (Microsoft.CognitiveServices/accounts) +// - Used as target for cross-resource copying operations (e.g., Sample15) +// - Kind: AIServices, SKU: S0 +// 3. Role assignments (Microsoft.Authorization/roleAssignments) +// - Grants test application/service principal "Cognitive Services User" role +// - One for primary resource, one for secondary resource +// 4. Model deployments (Microsoft.CognitiveServices/accounts/deployments) +// - Deployed via test-resources-post.ps1 script after resource creation +// - Creates deployments for: gpt-4.1, gpt-4.1-mini, text-embedding-3-large +// +// Environment Variables Generated (outputs): +// Primary Resource: +// - CONTENTUNDERSTANDING_ENDPOINT: Primary Foundry API endpoint +// +// Primary Resource (used as source for cross-resource copy): +// - CONTENTUNDERSTANDING_SOURCE_RESOURCE_ID: Primary Foundry resource ID +// - CONTENTUNDERSTANDING_SOURCE_REGION: Primary resource region +// +// Secondary Resource (used as target for cross-resource copy): +// - CONTENTUNDERSTANDING_TARGET_ENDPOINT: Secondary Foundry API endpoint +// - CONTENTUNDERSTANDING_TARGET_RESOURCE_ID: Secondary Foundry resource ID +// - CONTENTUNDERSTANDING_TARGET_REGION: Secondary resource region +// +// Model Deployment Names: +// - GPT_4_1_DEPLOYMENT: Deployment name for gpt-4.1 model +// - GPT_4_1_MINI_DEPLOYMENT: Deployment name for gpt-4.1-mini model +// - TEXT_EMBEDDING_3_LARGE_DEPLOYMENT: Deployment name for text-embedding-3-large model +// +// Authentication: +// - Uses DefaultAzureCredential (no API keys needed) +// - Role assignments grant access via "Cognitive Services User" role +// ============================================================================ + +@description('The client OID to grant access to test resources.') +param testApplicationOid string + +@minLength(6) +@maxLength(50) +@description('The base resource name.') +param baseName string = resourceGroup().name + +@description('The location of the resource. By default, this is the same as the resource group.') +param location string = resourceGroup().location + +// Role definition ID for "Cognitive Services User" role +var cognitiveServicesUserRoleId = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'a97b65f3-24c7-4388-baec-2e87135dc908') + +// Resource names +var testFoundryName = '${baseName}-foundry' +var targetFoundryName = '${baseName}-copy-target' + +// Source Microsoft Foundry resource (primary resource for most tests) +resource sourceFoundry 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' = { + name: testFoundryName + location: location + kind: 'AIServices' + sku: { + name: 'S0' + } + properties: { + customSubDomainName: toLower(testFoundryName) + publicNetworkAccess: 'Enabled' + } +} + +// Role assignment for source resource - grants test application access +// Note: principalType is omitted to allow Azure to infer it automatically (works for both User and ServicePrincipal) +resource sourceRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(resourceGroup().id, sourceFoundry.id, cognitiveServicesUserRoleId) + scope: sourceFoundry + properties: { + roleDefinitionId: cognitiveServicesUserRoleId + principalId: testApplicationOid + } +} + +// Target Microsoft Foundry resource (for cross-resource copy tests, e.g., Sample15) +resource targetFoundry 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' = { + name: targetFoundryName + location: location + kind: 'AIServices' + sku: { + name: 'S0' + } + properties: { + customSubDomainName: toLower(targetFoundryName) + publicNetworkAccess: 'Enabled' + } +} + +// Role assignment for target resource - grants test application access +// Note: principalType is omitted to allow Azure to infer it automatically (works for both User and ServicePrincipal) +resource targetRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(resourceGroup().id, targetFoundry.id, cognitiveServicesUserRoleId) + scope: targetFoundry + properties: { + roleDefinitionId: cognitiveServicesUserRoleId + principalId: testApplicationOid + } +} + +// Model deployments are handled by test-resources-post.ps1 script after resource creation. +// This allows models to be deployed even if they're not available during initial Bicep deployment. +// Deployments can take 5-15 minutes to complete. + +// Outputs - these become environment variables for tests +// Variable names match what ContentUnderstandingClientTestEnvironment expects +// Note: We use DefaultAzureCredential for authentication, so API keys are not needed +// Role assignments grant the test application/service principal access via the 'Cognitive Services User' role +// Construct endpoint from variable (endpoint format: https://{customSubDomainName}.services.ai.azure.com/) +// Using toLower(testFoundryName) which matches the customSubDomainName set in the resource +output CONTENTUNDERSTANDING_ENDPOINT string = 'https://${toLower(testFoundryName)}.services.ai.azure.com/' + +// Primary resource outputs (used as source for cross-resource copy) +output CONTENTUNDERSTANDING_SOURCE_RESOURCE_ID string = sourceFoundry.id +output CONTENTUNDERSTANDING_SOURCE_REGION string = location + +// Target resource outputs (for cross-resource copy) +// Construct endpoint from variable (endpoint format: https://{customSubDomainName}.services.ai.azure.com/) +// Using toLower(targetFoundryName) which matches the customSubDomainName set in the resource +output CONTENTUNDERSTANDING_TARGET_ENDPOINT string = 'https://${toLower(targetFoundryName)}.services.ai.azure.com/' +output CONTENTUNDERSTANDING_TARGET_RESOURCE_ID string = targetFoundry.id +output CONTENTUNDERSTANDING_TARGET_REGION string = location + +// Model deployment outputs - deployment names for tests +// These match what ContentUnderstandingClientTestEnvironment expects +output GPT_4_1_DEPLOYMENT string = 'gpt-4.1' +output GPT_4_1_MINI_DEPLOYMENT string = 'gpt-4.1-mini' +output TEXT_EMBEDDING_3_LARGE_DEPLOYMENT string = 'text-embedding-3-large' + diff --git a/sdk/contentunderstanding/tests.yml b/sdk/contentunderstanding/tests.yml new file mode 100644 index 000000000000..47d43f5b65da --- /dev/null +++ b/sdk/contentunderstanding/tests.yml @@ -0,0 +1,14 @@ +trigger: none + +extends: + template: /eng/pipelines/templates/stages/archetype-sdk-tests.yml + parameters: + ServiceDirectory: contentunderstanding + timeoutInMinutes: 150 # how long to run the job before automatically cancelling + Artifacts: + - name: azure-ai-contentunderstanding + groupId: com.azure + safeName: azureaicontentunderstanding + CloudConfig: + Public: + Location: 'eastus'