From 3794e93d9b582f5a5f63aba88259fb4efa6f8048 Mon Sep 17 00:00:00 2001 From: Ruben van der Linde Date: Thu, 18 Sep 2025 11:28:06 +0200 Subject: [PATCH 1/2] feat: add AI functionality with LLPhant integration - Add LLPhant dependency (theodo-group/llphant:^0.11.4) for AI capabilities - Add text and embedding fields to ObjectEntity for AI-generated content - Create database migration for AI fields (Version1Date20250918120000) - Add comprehensive AI configuration UI with provider selection - Support multiple AI providers: OpenAI, Ollama, Azure, Anthropic - Add AI settings store with connection testing and validation - Create detailed AI functionality documentation - Enable automatic text generation and vector embeddings for objects This implements the foundation for semantic search and AI-powered features using the LLPhant framework with support for various embedding models and language models from different providers. --- composer.json | 1 + lib/Db/ObjectEntity.php | 54 ++ lib/Migration/Version1Date20250918120000.php | 118 ++++ src/store/settings.js | 144 ++++ src/views/settings/Settings.vue | 5 + .../settings/sections/AiConfiguration.vue | 668 ++++++++++++++++++ website/docs/Features/ai-functionality.md | 311 ++++++++ website/docs/Features/index.md | 57 +- 8 files changed, 1357 insertions(+), 1 deletion(-) create mode 100644 lib/Migration/Version1Date20250918120000.php create mode 100644 src/views/settings/sections/AiConfiguration.vue create mode 100644 website/docs/Features/ai-functionality.md diff --git a/composer.json b/composer.json index 575b8e7f4..bbea946cc 100644 --- a/composer.json +++ b/composer.json @@ -75,6 +75,7 @@ "bamarni/composer-bin-plugin": "^1.8", "elasticsearch/elasticsearch": "^v8.14.0", "guzzlehttp/guzzle": "^7.0", + "theodo-group/llphant": "^0.11.4", "opis/json-schema": "^2.3", "phpoffice/phpspreadsheet": "^4.2", "react/event-loop": "^1.5", diff --git a/lib/Db/ObjectEntity.php b/lib/Db/ObjectEntity.php index 73001aeeb..6ce7993df 100644 --- a/lib/Db/ObjectEntity.php +++ b/lib/Db/ObjectEntity.php @@ -340,6 +340,31 @@ class ObjectEntity extends Entity implements JsonSerializable */ protected ?DateTime $expires = null; + /** + * AI-generated text representation of the object for search and analysis + * + * This field stores a searchable text representation of the object's content, + * which can be used for full-text search, AI analysis, and content processing. + * The text is automatically generated from the object's data during save operations. + * + * @var string|null AI-generated text representation of the object + */ + protected ?string $text = null; + + /** + * AI-generated vector embedding of the object for semantic search + * + * This field stores a vector representation of the object's content for semantic + * similarity searches, AI-powered recommendations, and advanced analytics. + * The embedding is automatically generated from the object's text representation. + * + * Format: JSON array of floating-point numbers representing the vector dimensions + * Example: [0.1234, -0.5678, 0.9012, ...] + * + * @var array|null AI-generated vector embedding as JSON array + */ + protected ?array $embedding = null; + /** * Initialize the entity and define field types @@ -377,6 +402,8 @@ public function __construct() $this->addType(fieldName: 'depublished', type: 'datetime'); $this->addType(fieldName: 'groups', type: 'json'); $this->addType(fieldName: 'expires', type: 'datetime'); + $this->addType(fieldName: 'text', type: 'string'); + $this->addType(fieldName: 'embedding', type: 'json'); }//end __construct() @@ -471,6 +498,30 @@ public function getValidation(): ?array }//end getValidation() + /** + * Get the AI-generated text representation of the object + * + * @return string|null The text representation or null if not generated + */ + public function getText(): ?string + { + return $this->text; + + }//end getText() + + + /** + * Get the AI-generated vector embedding of the object + * + * @return array|null The vector embedding as JSON array or null if not generated + */ + public function getEmbedding(): ?array + { + return $this->embedding; + + }//end getEmbedding() + + /** * Get array of field names that are JSON type * @@ -588,6 +639,9 @@ public function getObjectArray(array $object=[]): array 'published' => $this->getFormattedDate($this->published), 'depublished' => $this->getFormattedDate($this->depublished), 'deleted' => $this->deleted, + 'text' => $this->text, + 'embedding' => $this->embedding, + 'expires' => $this->getFormattedDate($this->expires), ]; // Check for '@self' in the provided object array (this is the case if the object metadata is extended). diff --git a/lib/Migration/Version1Date20250918120000.php b/lib/Migration/Version1Date20250918120000.php new file mode 100644 index 000000000..f7d277158 --- /dev/null +++ b/lib/Migration/Version1Date20250918120000.php @@ -0,0 +1,118 @@ + + * @copyright 2024 Conduction B.V. + * @license EUPL-1.2 https://joinup.ec.europa.eu/collection/eupl/eupl-text-eupl-12 + * + * @version GIT: + * + * @link https://www.OpenRegister.nl + */ + +namespace OCA\OpenRegister\Migration; + +use Closure; +use OCP\DB\ISchemaWrapper; +use OCP\DB\Types; +use OCP\Migration\IOutput; +use OCP\Migration\SimpleMigrationStep; + +/** + * Migration to add AI functionality fields to objects table + * + * This migration implements AI enhancements including: + * - Text field for searchable text representation of objects + * - Embedding field for vector embeddings used in semantic search + * - Support for AI-powered content analysis and recommendations + */ +class Version1Date20250918120000 extends SimpleMigrationStep +{ + + /** + * Add AI functionality fields to objects table + * + * @param IOutput $output Migration output interface + * @param Closure $schemaClosure Schema closure + * @param array $options Migration options + * + * @return ISchemaWrapper|null Updated schema + */ + public function changeSchema(IOutput $output, Closure $schemaClosure, array $options): ?ISchemaWrapper + { + /** @var ISchemaWrapper $schema */ + $schema = $schemaClosure(); + + // Get the objects table to add AI fields + if ($schema->hasTable('openregister_objects')) { + $table = $schema->getTable('openregister_objects'); + + $output->info('🤖 Adding AI functionality fields to objects table...'); + + // Add text field for AI-generated text representation + if (!$table->hasColumn('text')) { + $table->addColumn('text', Types::TEXT, [ + 'notnull' => false, + 'length' => 65535, // TEXT field can hold up to 65KB + 'comment' => 'AI-generated text representation for search and analysis' + ]); + $output->info('✅ Added text field for AI-generated content representation'); + } else { + $output->info('â„šī¸ Text field already exists'); + } + + // Add embedding field for vector embeddings + if (!$table->hasColumn('embedding')) { + $table->addColumn('embedding', Types::JSON, [ + 'notnull' => false, + 'comment' => 'AI-generated vector embedding as JSON array for semantic search' + ]); + $output->info('✅ Added embedding field for vector representations'); + } else { + $output->info('â„šī¸ Embedding field already exists'); + } + + $output->info('đŸŽ¯ AI fields will enable semantic search and content analysis'); + + } else { + $output->info('âš ī¸ openregister_objects table not found'); + } + + $output->info('🎉 AI enhancement migration completed'); + + return $schema; + } + + /** + * Post schema update operations + * + * @param IOutput $output Migration output interface + * @param Closure $schemaClosure Schema closure + * @param array $options Migration options + * + * @return void + */ + public function postSchemaChange(IOutput $output, Closure $schemaClosure, array $options): void + { + $output->info('📋 Post-migration verification...'); + $output->info('✅ Text field ready for AI-generated content representation'); + $output->info('✅ Embedding field ready for vector-based semantic search'); + $output->info('✅ Objects can now be enhanced with AI-powered features'); + $output->info('🤖 AI functionality infrastructure successfully deployed'); + } + +}//end class diff --git a/src/store/settings.js b/src/store/settings.js index bd01220b7..d87747a09 100644 --- a/src/store/settings.js +++ b/src/store/settings.js @@ -95,6 +95,24 @@ export const useSettingsStore = defineStore('settings', { deleteLogRetention: 2592000000, // 1 month }, + aiOptions: { + enabled: false, + provider: 'openai', // openai, ollama, azure, anthropic + apiKey: '', + baseUrl: '', + model: 'gpt-3.5-turbo', + embeddingModel: 'text-embedding-ada-002', + maxTokens: 4000, + temperature: 0.7, + timeout: 30, + enableAutoTextGeneration: true, + enableAutoEmbedding: true, + embeddingDimensions: 1536, + batchSize: 100, + retryAttempts: 3, + enableLogging: true, + }, + versionInfo: { appName: 'Open Register', appVersion: '0.2.3', @@ -109,6 +127,63 @@ export const useSettingsStore = defineStore('settings', { { id: 'https', label: 'HTTPS' }, ], + // AI provider options based on LLPhant support + aiProviderOptions: [ + { id: 'openai', label: 'OpenAI', description: 'OpenAI GPT models and embeddings' }, + { id: 'ollama', label: 'Ollama', description: 'Local Ollama models' }, + { id: 'azure', label: 'Azure OpenAI', description: 'Microsoft Azure OpenAI Service' }, + { id: 'anthropic', label: 'Anthropic', description: 'Anthropic Claude models' }, + ], + + // Embedding model options supported by LLPhant + embeddingModelOptions: { + openai: [ + { id: 'text-embedding-ada-002', label: 'Ada v2 (1536 dims)', dimensions: 1536 }, + { id: 'text-embedding-3-small', label: 'Embedding v3 Small (1536 dims)', dimensions: 1536 }, + { id: 'text-embedding-3-large', label: 'Embedding v3 Large (3072 dims)', dimensions: 3072 }, + ], + ollama: [ + { id: 'nomic-embed-text', label: 'Nomic Embed Text (768 dims)', dimensions: 768 }, + { id: 'mxbai-embed-large', label: 'MxBai Embed Large (1024 dims)', dimensions: 1024 }, + { id: 'all-minilm', label: 'All MiniLM (384 dims)', dimensions: 384 }, + ], + azure: [ + { id: 'text-embedding-ada-002', label: 'Ada v2 (1536 dims)', dimensions: 1536 }, + { id: 'text-embedding-3-small', label: 'Embedding v3 Small (1536 dims)', dimensions: 1536 }, + { id: 'text-embedding-3-large', label: 'Embedding v3 Large (3072 dims)', dimensions: 3072 }, + ], + anthropic: [ + // Anthropic doesn't provide embedding models directly, typically uses OpenAI + { id: 'text-embedding-ada-002', label: 'OpenAI Ada v2 (1536 dims)', dimensions: 1536 }, + ], + }, + + // Language model options + languageModelOptions: { + openai: [ + { id: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', description: 'Fast and efficient' }, + { id: 'gpt-4', label: 'GPT-4', description: 'Most capable model' }, + { id: 'gpt-4-turbo', label: 'GPT-4 Turbo', description: 'Faster GPT-4 variant' }, + { id: 'gpt-4o', label: 'GPT-4o', description: 'Optimized GPT-4' }, + ], + ollama: [ + { id: 'llama2', label: 'Llama 2', description: 'Meta\'s Llama 2 model' }, + { id: 'llama2:13b', label: 'Llama 2 13B', description: 'Larger Llama 2 variant' }, + { id: 'mistral', label: 'Mistral', description: 'Mistral AI model' }, + { id: 'codellama', label: 'Code Llama', description: 'Code-specialized Llama' }, + ], + azure: [ + { id: 'gpt-35-turbo', label: 'GPT-3.5 Turbo', description: 'Azure GPT-3.5' }, + { id: 'gpt-4', label: 'GPT-4', description: 'Azure GPT-4' }, + { id: 'gpt-4-32k', label: 'GPT-4 32K', description: 'Large context GPT-4' }, + ], + anthropic: [ + { id: 'claude-3-haiku', label: 'Claude 3 Haiku', description: 'Fast and lightweight' }, + { id: 'claude-3-sonnet', label: 'Claude 3 Sonnet', description: 'Balanced performance' }, + { id: 'claude-3-opus', label: 'Claude 3 Opus', description: 'Most capable Claude model' }, + ], + }, + // Statistics data stats: { warnings: { @@ -268,6 +343,7 @@ export const useSettingsStore = defineStore('settings', { this.loadRbacSettings(), this.loadMultitenancySettings(), this.loadRetentionSettings(), + this.loadAiSettings(), this.loadVersionInfo(), this.loadAvailableOptions(), ]) @@ -540,6 +616,20 @@ export const useSettingsStore = defineStore('settings', { } }, + /** + * Load AI settings + */ + async loadAiSettings() { + try { + const response = await axios.get(generateUrl('/apps/openregister/api/settings/ai')) + if (response.data) { + this.aiOptions = { ...this.aiOptions, ...response.data } + } + } catch (error) { + console.error('Failed to load AI settings:', error) + } + }, + /** * Update Retention settings */ @@ -566,6 +656,58 @@ export const useSettingsStore = defineStore('settings', { } }, + /** + * Update AI settings + */ + async updateAiSettings(aiData) { + this.saving = true + try { + const response = await axios.put( + generateUrl('/apps/openregister/api/settings/ai'), + aiData + ) + + if (response.data) { + this.aiOptions = { ...this.aiOptions, ...response.data } + } + + showSuccess('AI settings updated successfully') + return response.data + } catch (error) { + console.error('Failed to update AI settings:', error) + showError('Failed to update AI settings: ' + error.message) + throw error + } finally { + this.saving = false + } + }, + + /** + * Test AI connection + */ + async testAiConnection() { + this.testingConnection = true + try { + const response = await axios.post( + generateUrl('/apps/openregister/api/settings/ai/test') + ) + + if (response.data.success) { + showSuccess('AI connection test successful') + } else { + showError('AI connection test failed: ' + response.data.message) + } + + return response.data + } catch (error) { + console.error('AI connection test failed:', error) + showError('AI connection test failed: ' + error.message) + throw error + } finally { + this.testingConnection = false + } + }, + /** * Load version information */ @@ -736,6 +878,8 @@ export const useSettingsStore = defineStore('settings', { return await this.updateMultitenancySettings(data.multitenancy) } else if (data.retention) { return await this.updateRetentionSettings(data.retention) + } else if (data.ai) { + return await this.updateAiSettings(data.ai) } else { // Fallback to legacy endpoint const response = await axios.put(generateUrl('/apps/openregister/api/settings'), data) diff --git a/src/views/settings/Settings.vue b/src/views/settings/Settings.vue index b2c28d6c7..5b2134f43 100644 --- a/src/views/settings/Settings.vue +++ b/src/views/settings/Settings.vue @@ -46,6 +46,9 @@ + + + @@ -62,6 +65,7 @@ import CacheManagement from './sections/CacheManagement.vue' import RbacConfiguration from './sections/RbacConfiguration.vue' import MultitenancyConfiguration from './sections/MultitenancyConfiguration.vue' import RetentionConfiguration from './sections/RetentionConfiguration.vue' +import AiConfiguration from './sections/AiConfiguration.vue' /** * @class Settings @@ -90,6 +94,7 @@ export default { RbacConfiguration, MultitenancyConfiguration, RetentionConfiguration, + AiConfiguration, }, computed: { diff --git a/src/views/settings/sections/AiConfiguration.vue b/src/views/settings/sections/AiConfiguration.vue new file mode 100644 index 000000000..96ec4914a --- /dev/null +++ b/src/views/settings/sections/AiConfiguration.vue @@ -0,0 +1,668 @@ + + + + + diff --git a/website/docs/Features/ai-functionality.md b/website/docs/Features/ai-functionality.md new file mode 100644 index 000000000..271246295 --- /dev/null +++ b/website/docs/Features/ai-functionality.md @@ -0,0 +1,311 @@ +# AI Functionality + +OpenRegister integrates with the [LLPhant framework](https://github.com/LLPhant/LLPhant) to provide advanced AI capabilities including automatic text generation and vector embeddings for semantic search and content analysis. + +## Overview + +The AI functionality enables: + +- **Automatic Text Generation**: Generate searchable text representations of objects +- **Vector Embeddings**: Create semantic embeddings for similarity search and recommendations +- **Content Analysis**: AI-powered analysis of object content +- **Semantic Search**: Enhanced search capabilities using vector similarity + +## Supported AI Providers + +OpenRegister supports multiple AI providers through the LLPhant framework: + +### OpenAI +- **Models**: GPT-3.5 Turbo, GPT-4, GPT-4 Turbo, GPT-4o +- **Embeddings**: Ada v2, Embedding v3 Small/Large +- **Configuration**: Requires OpenAI API key + +### Ollama (Local AI) +- **Models**: Llama 2, Mistral, Code Llama +- **Embeddings**: Nomic Embed Text, MxBai Embed Large, All MiniLM +- **Configuration**: Requires local Ollama installation + +### Azure OpenAI +- **Models**: GPT-3.5 Turbo, GPT-4, GPT-4 32K +- **Embeddings**: Ada v2, Embedding v3 Small/Large +- **Configuration**: Requires Azure OpenAI service and endpoint + +### Anthropic +- **Models**: Claude 3 Haiku, Sonnet, Opus +- **Embeddings**: Uses OpenAI embeddings (recommended) +- **Configuration**: Requires Anthropic API key + +## Configuration + +### Basic Setup + +1. **Install Dependencies**: LLPhant is automatically included via Composer +2. **Enable AI**: Toggle AI functionality in Settings → AI Configuration +3. **Choose Provider**: Select your preferred AI provider +4. **Configure API**: Enter API keys and connection details +5. **Select Models**: Choose language and embedding models + +### Provider-Specific Configuration + +#### OpenAI Configuration +```json +{ + 'provider': 'openai', + 'apiKey': 'sk-...', + 'model': 'gpt-3.5-turbo', + 'embeddingModel': 'text-embedding-ada-002', + 'maxTokens': 4000, + 'temperature': 0.7 +} +``` + +#### Ollama Configuration +```json +{ + 'provider': 'ollama', + 'baseUrl': 'http://localhost:11434', + 'model': 'llama2', + 'embeddingModel': 'nomic-embed-text', + 'maxTokens': 4000, + 'temperature': 0.7 +} +``` + +#### Azure Configuration +```json +{ + 'provider': 'azure', + 'apiKey': 'your-azure-key', + 'baseUrl': 'https://your-resource.openai.azure.com', + 'model': 'gpt-35-turbo', + 'embeddingModel': 'text-embedding-ada-002' +} +``` + +#### Anthropic Configuration +```json +{ + 'provider': 'anthropic', + 'apiKey': 'sk-ant-...', + 'model': 'claude-3-sonnet', + 'embeddingModel': 'text-embedding-ada-002' +} +``` + +## Database Schema + +AI functionality adds two new fields to the `openregister_objects` table: + +### Text Field +- **Type**: TEXT (65KB capacity) +- **Purpose**: Stores AI-generated searchable text representation +- **Usage**: Full-text search, content analysis + +### Embedding Field +- **Type**: JSON +- **Purpose**: Stores vector embeddings as JSON arrays +- **Format**: `[0.1234, -0.5678, 0.9012, ...]` +- **Usage**: Semantic similarity search, recommendations + +## Processing Options + +### Automatic Processing +- **Auto Text Generation**: Automatically generate text for new/updated objects +- **Auto Embedding**: Automatically create embeddings for semantic search +- **Batch Processing**: Process multiple objects efficiently + +### Manual Processing +- **Batch Size**: Configure number of objects per batch (1-1000) +- **Retry Logic**: Automatic retry for failed requests (0-10 attempts) +- **Timeout**: Request timeout configuration (5-300 seconds) + +## Embedding Models and Dimensions + +Different embedding models produce vectors with different dimensions: + +### OpenAI Models +- **text-embedding-ada-002**: 1536 dimensions +- **text-embedding-3-small**: 1536 dimensions +- **text-embedding-3-large**: 3072 dimensions + +### Ollama Models +- **nomic-embed-text**: 768 dimensions +- **mxbai-embed-large**: 1024 dimensions +- **all-minilm**: 384 dimensions + +### Choosing Embedding Models +- **Higher dimensions**: More detailed representations, better accuracy +- **Lower dimensions**: Faster processing, less storage +- **Compatibility**: Ensure consistent dimensions for similarity comparisons + +## API Integration + +### Text Generation +Objects automatically generate text representations during save operations: + +```php +$object = new ObjectEntity(); +$object->setObject(['name' => 'Example', 'description' => 'Test object']); + +// AI service generates text representation +$aiService->generateText($object); + +// Text is stored in the 'text' field +echo $object->getText(); // 'Example: Test object with detailed description...' +``` + +### Embedding Generation +Vector embeddings are created from text representations: + +```php +// Generate embedding from text +$embedding = $aiService->generateEmbedding($object->getText()); + +// Store embedding in object +$object->setEmbedding($embedding); + +// Embedding is now available for similarity search +$similarObjects = $searchService->findSimilar($object->getEmbedding()); +``` + +## Search Integration + +### Semantic Search +Use embeddings for semantic similarity searches: + +```php +// Find objects similar to a query +$queryEmbedding = $aiService->generateEmbedding('search query'); +$results = $objectService->findBySimilarity($queryEmbedding, $threshold = 0.8); +``` + +### Enhanced Text Search +AI-generated text improves full-text search capabilities: + +```php +// Search in AI-generated text representations +$results = $objectService->searchText('complex query terms'); +``` + +## Performance Considerations + +### Batch Processing +- Process objects in configurable batches +- Avoid API rate limits with proper batching +- Monitor processing progress and errors + +### Caching +- Cache embeddings to avoid regeneration +- Use appropriate cache invalidation strategies +- Consider storage requirements for embeddings + +### Cost Management +- Monitor API usage and costs +- Use appropriate models for your use case +- Consider local models (Ollama) for cost efficiency + +## Error Handling + +### Connection Testing +Test AI provider connections before processing: + +```php +$testResult = $aiService->testConnection(); +if (!$testResult->success) { + throw new Exception('AI service unavailable: ' . $testResult->message); +} +``` + +### Retry Logic +Automatic retry for failed requests: +- Configurable retry attempts (0-10) +- Exponential backoff for rate limiting +- Detailed error logging for troubleshooting + +### Fallback Strategies +- Continue operation if AI services are unavailable +- Queue objects for later AI processing +- Maintain search functionality without embeddings + +## Security Considerations + +### API Key Management +- Store API keys securely in environment variables +- Use separate keys for development/production +- Rotate keys regularly + +### Data Privacy +- AI providers may process your data +- Review provider privacy policies +- Consider local models for sensitive data + +### Access Control +- Restrict AI configuration to administrators +- Audit AI processing activities +- Monitor usage and costs + +## Monitoring and Logging + +### Processing Logs +- Detailed logs for AI operations +- Error tracking and alerting +- Performance metrics collection + +### Usage Analytics +- Track AI service usage +- Monitor costs and quotas +- Analyze processing performance + +## Troubleshooting + +### Common Issues + +#### Connection Failures +- Verify API keys and endpoints +- Check network connectivity +- Review firewall settings + +#### Model Availability +- Ensure selected models are available +- Check provider service status +- Verify model permissions + +#### Performance Issues +- Adjust batch sizes +- Optimize timeout settings +- Monitor API rate limits + +### Debug Mode +Enable detailed logging for troubleshooting: + +```json +{ + 'enableLogging': true, + 'logLevel': 'debug' +} +``` + +## Migration + +When enabling AI functionality on existing installations: + +1. **Run Migration**: Database schema updates automatically +2. **Configure Provider**: Set up AI provider and models +3. **Bulk Processing**: Process existing objects for embeddings +4. **Verify Results**: Test search and AI functionality + +## Future Enhancements + +Planned AI features include: +- Content classification and tagging +- Automated metadata extraction +- Multi-language support +- Custom model fine-tuning +- Advanced analytics and insights + +## Support + +For AI functionality support: +- Check LLPhant documentation: https://github.com/LLPhant/LLPhant +- Review provider-specific documentation +- Contact support for configuration assistance diff --git a/website/docs/Features/index.md b/website/docs/Features/index.md index f92905c5d..49d1b95c2 100644 --- a/website/docs/Features/index.md +++ b/website/docs/Features/index.md @@ -18,7 +18,7 @@ One of the most powerful aspects of Open Register is how its core concepts inter ## Relationship Overview -The core entities in Open Register - Registers, Schemas, Objects, Files, Sources, and Events - form an interconnected system: +The core entities in Open Register - Registers, Schemas, Objects, Files, Sources, Events, and AI functionality - form an interconnected system: ```mermaid architecture-beta @@ -416,6 +416,61 @@ The dashboard sidebar now interacts with the dashboard store in a reactive way. This approach improves maintainability and ensures a single source of truth for the active register and schema throughout the application. +## AI-Object Relationship + +OpenRegister integrates AI functionality to enhance objects with automatic text generation and vector embeddings: + +### How AI Enhances Objects + +- Objects can have AI-generated text representations stored in the 'text' field +- Objects can have vector embeddings stored in the 'embedding' field for semantic search +- AI processing is configurable and can be automatic or manual +- Multiple AI providers are supported through the LLPhant framework + +### Example + +```json +// Object with AI enhancements +{ + 'id': 'document-12345', + 'register': 'document-register', + 'schema': 'document', + 'object': { + 'title': 'Annual Report', + 'content': 'This report contains...' + }, + 'text': 'Annual Report: This comprehensive document contains financial data, performance metrics, and strategic insights for the fiscal year...', + 'embedding': [0.1234, -0.5678, 0.9012, 0.3456, ...], // 1536-dimensional vector + // other properties... +} +``` + +### AI Processing Flow + +```mermaid +graph TD + ObjectSave[Object Save] -->|triggers| AiProcessor[AI Processor] + AiProcessor -->|generates| TextRepresentation[Text Representation] + AiProcessor -->|creates| VectorEmbedding[Vector Embedding] + TextRepresentation -->|stored in| TextField[text field] + VectorEmbedding -->|stored in| EmbeddingField[embedding field] + EmbeddingField -->|enables| SemanticSearch[Semantic Search] + TextField -->|improves| FullTextSearch[Full-Text Search] +``` + +### Design Considerations + +When designing AI integration: + +1. **Provider Selection**: Choose appropriate AI provider based on requirements, cost, and privacy +2. **Model Configuration**: Select suitable language and embedding models for your use case +3. **Processing Strategy**: Decide between automatic and manual AI processing +4. **Performance**: Consider batch processing for large datasets +5. **Cost Management**: Monitor API usage and implement appropriate limits +6. **Data Privacy**: Ensure compliance with privacy requirements when using external AI services + +For detailed AI configuration and usage, see [AI Functionality](ai-functionality.md). + ## Conclusion The relationships between Open Register's core concepts create a flexible yet structured system for managing data. By understanding these relationships, you can design effective data models that leverage the full power of the system while maintaining data quality and performance. \ No newline at end of file From f0bea1156349393ab5ab3527881dc2c32f41de09 Mon Sep 17 00:00:00 2001 From: Ruben van der Linde Date: Thu, 18 Sep 2025 12:40:24 +0200 Subject: [PATCH 2/2] feat: add comprehensive AI functionality to OpenRegister - Add AiService for AI operations using LLPhant framework - Add ChatController for AI chat interactions and text/embedding generation - Add AI settings endpoints (GET/PUT /api/settings/ai, POST /api/settings/ai/test) - Add chat API endpoints (/api/chat, /api/chat/capabilities, etc.) - Integrate AI enrichment into object creation/update workflow - Add text and embedding fields to ObjectEntity - Update documentation with comprehensive AI functionality guide - Support multiple AI providers: OpenAI, Ollama, Azure, Anthropic - Auto-generate text representations and vector embeddings for objects - Add AI configuration UI section in settings - Implement dependency injection for AiService in Application.php --- appinfo/routes.php | 17 +- lib/AppInfo/Application.php | 17 +- lib/Controller/ChatController.php | 408 ++++++++++++++++++ lib/Controller/SettingsController.php | 55 +++ lib/Service/AiService.php | 484 ++++++++++++++++++++++ lib/Service/ObjectHandlers/SaveObject.php | 48 +++ lib/Service/SettingsService.php | 190 +++++++++ website/docs/Features/ai-functionality.md | 147 ++++++- 8 files changed, 1362 insertions(+), 4 deletions(-) create mode 100644 lib/Controller/ChatController.php create mode 100644 lib/Service/AiService.php diff --git a/appinfo/routes.php b/appinfo/routes.php index ca0635857..a00ff66ff 100644 --- a/appinfo/routes.php +++ b/appinfo/routes.php @@ -39,6 +39,11 @@ ['name' => 'settings#getVersionInfo', 'url' => '/api/settings/version', 'verb' => 'GET'], + // AI settings endpoints + ['name' => 'settings#getAiSettings', 'url' => '/api/settings/ai', 'verb' => 'GET'], + ['name' => 'settings#updateAiSettings', 'url' => '/api/settings/ai', 'verb' => 'PUT'], + ['name' => 'settings#testAiConnection', 'url' => '/api/settings/ai/test', 'verb' => 'POST'], + // Statistics endpoint ['name' => 'settings#getStatistics', 'url' => '/api/settings/statistics', 'verb' => 'GET'], @@ -165,7 +170,15 @@ ['name' => 'organisation#setActive', 'url' => '/api/organisations/{uuid}/set-active', 'verb' => 'POST'], ['name' => 'organisation#join', 'url' => '/api/organisations/{uuid}/join', 'verb' => 'POST'], ['name' => 'organisation#leave', 'url' => '/api/organisations/{uuid}/leave', 'verb' => 'POST'], - // Tags - ['name' => 'tags#getAllTags', 'url' => 'api/tags', 'verb' => 'GET'], + // Tags + ['name' => 'tags#getAllTags', 'url' => 'api/tags', 'verb' => 'GET'], + + // AI Chat endpoints + ['name' => 'chat#chat', 'url' => '/api/chat', 'verb' => 'POST'], + ['name' => 'chat#getCapabilities', 'url' => '/api/chat/capabilities', 'verb' => 'GET'], + ['name' => 'chat#testConnection', 'url' => '/api/chat/test', 'verb' => 'POST'], + ['name' => 'chat#getContext', 'url' => '/api/chat/context', 'verb' => 'GET'], + ['name' => 'chat#generateText', 'url' => '/api/chat/generate-text', 'verb' => 'POST'], + ['name' => 'chat#generateEmbedding', 'url' => '/api/chat/generate-embedding', 'verb' => 'POST'], ], ]; diff --git a/lib/AppInfo/Application.php b/lib/AppInfo/Application.php index 1d5b48442..e598a6002 100644 --- a/lib/AppInfo/Application.php +++ b/lib/AppInfo/Application.php @@ -48,6 +48,7 @@ use OCA\OpenRegister\Service\SolrService; use OCA\OpenRegister\Service\GuzzleSolrService; use OCA\OpenRegister\Service\SettingsService; +use OCA\OpenRegister\Service\AiService; use OCA\OpenRegister\Service\SolrSchemaService; use OCA\OpenRegister\Setup\SolrSetup; use OCA\OpenRegister\Service\SchemaCacheService; @@ -231,7 +232,20 @@ function ($container) { ); - // Register SaveObject with consolidated cache services + // Register AiService for AI functionality + $context->registerService( + AiService::class, + function ($container) { + return new AiService( + $container->get('OCP\IAppConfig'), + $container->get('OCP\IConfig'), + $container->get('Psr\Log\LoggerInterface'), + $container->get(SettingsService::class) + ); + } + ); + + // Register SaveObject with consolidated cache services and AI service $context->registerService( SaveObject::class, function ($container) { @@ -247,6 +261,7 @@ function ($container) { $container->get(ObjectCacheService::class), $container->get(SchemaCacheService::class), $container->get(SchemaFacetCacheService::class), + $container->get(AiService::class), $container->get('Psr\Log\LoggerInterface'), new \Twig\Loader\ArrayLoader([]) ); diff --git a/lib/Controller/ChatController.php b/lib/Controller/ChatController.php new file mode 100644 index 000000000..49334e39b --- /dev/null +++ b/lib/Controller/ChatController.php @@ -0,0 +1,408 @@ + + * @copyright 2024 Conduction B.V. + * @license EUPL-1.2 https://joinup.ec.europa.eu/collection/eupl/eupl-text-eupl-12 + * + * @version GIT: + * + * @link https://www.OpenRegister.nl + */ + +namespace OCA\OpenRegister\Controller; + +use OCP\AppFramework\Controller; +use OCP\AppFramework\Http\JSONResponse; +use OCP\IRequest; +use Psr\Container\ContainerInterface; +use OCA\OpenRegister\Service\AiService; +use OCA\OpenRegister\Service\SettingsService; +use Psr\Log\LoggerInterface; + +/** + * Controller for handling AI chat interactions in OpenRegister. + * + * Provides endpoints for chat conversations, context-aware AI responses, + * and AI-powered assistance for data management tasks. + */ +class ChatController extends Controller +{ + /** + * ChatController constructor. + * + * @param string $appName The name of the app + * @param IRequest $request The request object + * @param ContainerInterface $container The container for dependency injection + * @param AiService $aiService The AI service for chat operations + * @param SettingsService $settingsService The settings service for configuration + * @param LoggerInterface $logger Logger interface for logging operations + */ + public function __construct( + $appName, + IRequest $request, + private readonly ContainerInterface $container, + private readonly AiService $aiService, + private readonly SettingsService $settingsService, + private readonly LoggerInterface $logger + ) { + parent::__construct($appName, $request); + }//end __construct() + + /** + * Handle chat message and return AI response + * + * Processes a user message and returns an AI-generated response with optional + * context information and conversation history for enhanced accuracy. + * + * @return JSONResponse JSON response containing AI chat response + * + * @NoAdminRequired + * @NoCSRFRequired + */ + public function chat(): JSONResponse + { + try { + $data = $this->request->getParams(); + + // Validate required parameters + if (empty($data['message'])) { + return new JSONResponse([ + 'error' => 'Message is required' + ], 400); + } + + $message = $data['message']; + $context = $data['context'] ?? []; + $history = $data['history'] ?? []; + $provider = $data['provider'] ?? null; + + // Check if AI is enabled + if (!$this->aiService->isAiEnabled()) { + return new JSONResponse([ + 'error' => 'AI functionality is not enabled. Please configure AI settings first.' + ], 503); + } + + // Process chat message + $response = $this->aiService->chat($message, $context, $history, $provider); + + $this->logger->info('Chat message processed successfully', [ + 'message_length' => strlen($message), + 'response_length' => strlen($response['message']), + 'provider' => $response['provider'] + ]); + + return new JSONResponse([ + 'success' => true, + 'response' => $response['message'], + 'metadata' => [ + 'provider' => $response['provider'], + 'model' => $response['model'], + 'timestamp' => $response['timestamp'], + 'context_used' => $response['context_used'], + 'history_length' => $response['history_length'] + ] + ]); + + } catch (\Exception $e) { + $this->logger->error('Chat processing failed', [ + 'error' => $e->getMessage(), + 'trace' => $e->getTraceAsString() + ]); + + return new JSONResponse([ + 'error' => 'Failed to process chat message: ' . $e->getMessage() + ], 500); + } + }//end chat() + + /** + * Get AI chat capabilities and status + * + * Returns information about available AI providers, models, and current + * configuration status for the chat functionality. + * + * @return JSONResponse JSON response containing chat capabilities information + * + * @NoAdminRequired + * @NoCSRFRequired + */ + public function getCapabilities(): JSONResponse + { + try { + $aiSettings = $this->settingsService->getAiSettingsOnly(); + $isEnabled = $this->aiService->isAiEnabled(); + + $capabilities = [ + 'enabled' => $isEnabled, + 'provider' => $aiSettings['provider'], + 'model' => $aiSettings['model'], + 'embedding_model' => $aiSettings['embeddingModel'], + 'max_tokens' => $aiSettings['maxTokens'], + 'temperature' => $aiSettings['temperature'], + 'auto_enrich_objects' => $aiSettings['autoEnrichObjects'], + 'supported_providers' => [ + 'openai' => [ + 'name' => 'OpenAI', + 'models' => ['gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo', 'gpt-4o'], + 'embedding_models' => ['text-embedding-ada-002', 'text-embedding-3-small', 'text-embedding-3-large'] + ], + 'ollama' => [ + 'name' => 'Ollama (Local)', + 'models' => ['llama2', 'llama3', 'mistral', 'codellama'], + 'embedding_models' => ['nomic-embed-text', 'all-minilm'] + ] + ], + 'features' => [ + 'chat' => $isEnabled, + 'embeddings' => $isEnabled, + 'object_enrichment' => $isEnabled && $aiSettings['autoEnrichObjects'], + 'context_aware' => true, + 'conversation_history' => true + ] + ]; + + return new JSONResponse($capabilities); + + } catch (\Exception $e) { + $this->logger->error('Failed to get chat capabilities', [ + 'error' => $e->getMessage() + ]); + + return new JSONResponse([ + 'error' => 'Failed to get chat capabilities: ' . $e->getMessage() + ], 500); + } + }//end getCapabilities() + + /** + * Test AI chat connection + * + * Performs a connection test to verify that the AI chat functionality + * is working correctly with the current configuration. + * + * @return JSONResponse JSON response containing connection test results + * + * @NoAdminRequired + * @NoCSRFRequired + */ + public function testConnection(): JSONResponse + { + try { + $data = $this->request->getParams(); + $provider = $data['provider'] ?? null; + + $result = $this->aiService->testConnection($provider); + + return new JSONResponse($result); + + } catch (\Exception $e) { + $this->logger->error('AI connection test failed', [ + 'error' => $e->getMessage() + ]); + + return new JSONResponse([ + 'success' => false, + 'message' => 'Connection test failed: ' . $e->getMessage(), + 'details' => ['exception' => $e->getMessage()] + ], 500); + } + }//end testConnection() + + /** + * Get context information for AI chat + * + * Retrieves relevant context information that can be used to enhance + * AI chat responses, including available registers, schemas, and objects. + * + * @return JSONResponse JSON response containing context information + * + * @NoAdminRequired + * @NoCSRFRequired + */ + public function getContext(): JSONResponse + { + try { + $context = []; + + // Try to get available registers + try { + if ($this->container->has('OCA\OpenRegister\Db\RegisterMapper')) { + $registerMapper = $this->container->get('OCA\OpenRegister\Db\RegisterMapper'); + $registers = $registerMapper->findAll(limit: 20); + $context['registers'] = array_map(function($register) { + return [ + 'id' => $register->getId(), + 'name' => $register->getName(), + 'description' => $register->getDescription() + ]; + }, $registers); + } + } catch (\Exception $e) { + $this->logger->debug('Could not load registers for context', ['error' => $e->getMessage()]); + } + + // Try to get available schemas + try { + if ($this->container->has('OCA\OpenRegister\Db\SchemaMapper')) { + $schemaMapper = $this->container->get('OCA\OpenRegister\Db\SchemaMapper'); + $schemas = $schemaMapper->findAll(limit: 20); + $context['schemas'] = array_map(function($schema) { + return [ + 'id' => $schema->getId(), + 'name' => $schema->getName(), + 'description' => $schema->getDescription(), + 'version' => $schema->getVersion() + ]; + }, $schemas); + } + } catch (\Exception $e) { + $this->logger->debug('Could not load schemas for context', ['error' => $e->getMessage()]); + } + + // Try to get object statistics + try { + $stats = $this->settingsService->getStats(); + $context['statistics'] = [ + 'total_objects' => $stats['totals']['totalObjects'] ?? 0, + 'total_registers' => $stats['totals']['totalRegisters'] ?? 0, + 'total_schemas' => $stats['totals']['totalSchemas'] ?? 0 + ]; + } catch (\Exception $e) { + $this->logger->debug('Could not load statistics for context', ['error' => $e->getMessage()]); + } + + // Add AI configuration info + $aiSettings = $this->settingsService->getAiSettingsOnly(); + $context['ai_config'] = [ + 'enabled' => $aiSettings['enabled'], + 'provider' => $aiSettings['provider'], + 'auto_enrich_objects' => $aiSettings['autoEnrichObjects'] + ]; + + return new JSONResponse([ + 'success' => true, + 'context' => $context, + 'generated_at' => date('c') + ]); + + } catch (\Exception $e) { + $this->logger->error('Failed to get chat context', [ + 'error' => $e->getMessage() + ]); + + return new JSONResponse([ + 'error' => 'Failed to get context information: ' . $e->getMessage() + ], 500); + } + }//end getContext() + + /** + * Generate text representation for an object + * + * Creates an AI-generated text representation for a given object, + * useful for search indexing and content analysis. + * + * @return JSONResponse JSON response containing generated text representation + * + * @NoAdminRequired + * @NoCSRFRequired + */ + public function generateText(): JSONResponse + { + try { + $data = $this->request->getParams(); + + if (empty($data['object'])) { + return new JSONResponse([ + 'error' => 'Object data is required' + ], 400); + } + + if (!$this->aiService->isAiEnabled()) { + return new JSONResponse([ + 'error' => 'AI functionality is not enabled' + ], 503); + } + + $objectData = $data['object']; + $textRepresentation = $this->aiService->generateTextRepresentation($objectData); + + return new JSONResponse([ + 'success' => true, + 'text' => $textRepresentation, + 'length' => strlen($textRepresentation), + 'generated_at' => date('c') + ]); + + } catch (\Exception $e) { + $this->logger->error('Failed to generate text representation', [ + 'error' => $e->getMessage() + ]); + + return new JSONResponse([ + 'error' => 'Failed to generate text representation: ' . $e->getMessage() + ], 500); + } + }//end generateText() + + /** + * Generate vector embedding for text + * + * Creates a vector embedding for the provided text that can be used + * for semantic search and similarity matching. + * + * @return JSONResponse JSON response containing generated vector embedding + * + * @NoAdminRequired + * @NoCSRFRequired + */ + public function generateEmbedding(): JSONResponse + { + try { + $data = $this->request->getParams(); + + if (empty($data['text'])) { + return new JSONResponse([ + 'error' => 'Text is required' + ], 400); + } + + if (!$this->aiService->isAiEnabled()) { + return new JSONResponse([ + 'error' => 'AI functionality is not enabled' + ], 503); + } + + $text = $data['text']; + $provider = $data['provider'] ?? null; + $embedding = $this->aiService->generateEmbedding($text, $provider); + + return new JSONResponse([ + 'success' => true, + 'embedding' => $embedding, + 'dimensions' => count($embedding), + 'text_length' => strlen($text), + 'generated_at' => date('c') + ]); + + } catch (\Exception $e) { + $this->logger->error('Failed to generate embedding', [ + 'error' => $e->getMessage() + ]); + + return new JSONResponse([ + 'error' => 'Failed to generate embedding: ' . $e->getMessage() + ], 500); + } + }//end generateEmbedding() + +}//end class diff --git a/lib/Controller/SettingsController.php b/lib/Controller/SettingsController.php index a576317be..3dac984fe 100644 --- a/lib/Controller/SettingsController.php +++ b/lib/Controller/SettingsController.php @@ -909,6 +909,61 @@ public function getVersionInfo(): JSONResponse } } + /** + * Get AI settings only + * + * @NoAdminRequired + * @NoCSRFRequired + * + * @return JSONResponse AI configuration + */ + public function getAiSettings(): JSONResponse + { + try { + $data = $this->settingsService->getAiSettingsOnly(); + return new JSONResponse($data); + } catch (\Exception $e) { + return new JSONResponse(['error' => $e->getMessage()], 500); + } + } + + /** + * Update AI settings only + * + * @NoAdminRequired + * @NoCSRFRequired + * + * @return JSONResponse Updated AI configuration + */ + public function updateAiSettings(): JSONResponse + { + try { + $data = $this->request->getParams(); + $result = $this->settingsService->updateAiSettingsOnly($data); + return new JSONResponse($result); + } catch (\Exception $e) { + return new JSONResponse(['error' => $e->getMessage()], 500); + } + } + + /** + * Test AI connection with current settings + * + * @NoAdminRequired + * @NoCSRFRequired + * + * @return JSONResponse AI connection test results + */ + public function testAiConnection(): JSONResponse + { + try { + $result = $this->settingsService->testAiConnection(); + return new JSONResponse($result); + } catch (\Exception $e) { + return new JSONResponse(['error' => $e->getMessage()], 500); + } + } + /** * Test schema-aware SOLR mapping by indexing sample objects * diff --git a/lib/Service/AiService.php b/lib/Service/AiService.php new file mode 100644 index 000000000..635a634d3 --- /dev/null +++ b/lib/Service/AiService.php @@ -0,0 +1,484 @@ + + * @copyright 2024 Conduction B.V. + * @license EUPL-1.2 https://joinup.ec.europa.eu/collection/eupl/eupl-text-eupl-12 + * + * @version GIT: + * + * @link https://www.OpenRegister.nl + */ + +namespace OCA\OpenRegister\Service; + +use OCP\IAppConfig; +use OCP\IConfig; +use Psr\Log\LoggerInterface; +use LLPhant\Chat\OpenAIChat; +use LLPhant\Embeddings\OpenAIEmbeddings; +use LLPhant\OpenAIConfig; +use LLPhant\Chat\OllamaChat; +use LLPhant\Embeddings\OllamaEmbeddings; +use LLPhant\OllamaConfig; +use LLPhant\Chat\Message; +use LLPhant\Chat\SystemMessage; +use LLPhant\Chat\UserMessage; + +/** + * Service for handling AI-related operations. + * + * Provides functionality for text generation, embeddings, chat interactions, + * and automatic content enrichment using various AI providers through LLPhant. + */ +class AiService +{ + /** + * The name of the application + * + * @var string $appName The name of the app + */ + private string $appName; + + /** + * AiService constructor. + * + * @param IAppConfig $config App configuration interface + * @param IConfig $systemConfig System configuration interface + * @param LoggerInterface $logger Logger interface for logging operations + * @param SettingsService $settingsService Settings service for configuration + */ + public function __construct( + private readonly IAppConfig $config, + private readonly IConfig $systemConfig, + private readonly LoggerInterface $logger, + private readonly SettingsService $settingsService + ) { + $this->appName = 'openregister'; + }//end __construct() + + /** + * Check if AI functionality is enabled and properly configured + * + * @return bool True if AI is enabled and configured, false otherwise + */ + public function isAiEnabled(): bool + { + try { + $aiSettings = $this->settingsService->getAiSettingsOnly(); + return $aiSettings['enabled'] && !empty($aiSettings['apiKey']) || $aiSettings['provider'] === 'ollama'; + } catch (\Exception $e) { + $this->logger->error('Failed to check AI configuration', ['error' => $e->getMessage()]); + return false; + } + }//end isAiEnabled() + + /** + * Get AI configuration for the specified provider + * + * @param string|null $provider AI provider (openai, ollama, etc.) + * @return array AI configuration + * @throws \RuntimeException If AI configuration is invalid + */ + private function getAiConfig(?string $provider = null): array + { + $aiSettings = $this->settingsService->getAiSettingsOnly(); + + if (!$aiSettings['enabled']) { + throw new \RuntimeException('AI functionality is disabled'); + } + + $provider = $provider ?? $aiSettings['provider']; + + switch ($provider) { + case 'openai': + if (empty($aiSettings['apiKey'])) { + throw new \RuntimeException('OpenAI API key is required'); + } + return [ + 'provider' => 'openai', + 'apiKey' => $aiSettings['apiKey'], + 'model' => $aiSettings['model'], + 'embeddingModel' => $aiSettings['embeddingModel'], + 'maxTokens' => $aiSettings['maxTokens'], + 'temperature' => $aiSettings['temperature'], + 'timeout' => $aiSettings['timeout'], + ]; + + case 'ollama': + return [ + 'provider' => 'ollama', + 'host' => $aiSettings['host'] ?: 'localhost', + 'port' => $aiSettings['port'] ?: 11434, + 'model' => $aiSettings['model'], + 'embeddingModel' => $aiSettings['embeddingModel'], + 'maxTokens' => $aiSettings['maxTokens'], + 'temperature' => $aiSettings['temperature'], + 'timeout' => $aiSettings['timeout'], + ]; + + default: + throw new \RuntimeException('Unsupported AI provider: ' . $provider); + } + }//end getAiConfig() + + /** + * Create chat client for the configured AI provider + * + * @param string|null $provider AI provider to use + * @return OpenAIChat|OllamaChat Chat client instance + * @throws \RuntimeException If chat client creation fails + */ + private function createChatClient(?string $provider = null) + { + $config = $this->getAiConfig($provider); + + switch ($config['provider']) { + case 'openai': + $openAIConfig = new OpenAIConfig(); + $openAIConfig->apiKey = $config['apiKey']; + $openAIConfig->model = $config['model']; + return new OpenAIChat($openAIConfig); + + case 'ollama': + $ollamaConfig = new OllamaConfig(); + $ollamaConfig->url = 'http://' . $config['host'] . ':' . $config['port']; + $ollamaConfig->model = $config['model']; + return new OllamaChat($ollamaConfig); + + default: + throw new \RuntimeException('Unsupported provider for chat: ' . $config['provider']); + } + }//end createChatClient() + + /** + * Create embeddings client for the configured AI provider + * + * @param string|null $provider AI provider to use + * @return OpenAIEmbeddings|OllamaEmbeddings Embeddings client instance + * @throws \RuntimeException If embeddings client creation fails + */ + private function createEmbeddingsClient(?string $provider = null) + { + $config = $this->getAiConfig($provider); + + switch ($config['provider']) { + case 'openai': + $openAIConfig = new OpenAIConfig(); + $openAIConfig->apiKey = $config['apiKey']; + $openAIConfig->model = $config['embeddingModel']; + return new OpenAIEmbeddings($openAIConfig); + + case 'ollama': + $ollamaConfig = new OllamaConfig(); + $ollamaConfig->url = 'http://' . $config['host'] . ':' . $config['port']; + $ollamaConfig->model = $config['embeddingModel']; + return new OllamaEmbeddings($ollamaConfig); + + default: + throw new \RuntimeException('Unsupported provider for embeddings: ' . $config['provider']); + } + }//end createEmbeddingsClient() + + /** + * Generate text representation for an object + * + * Creates a searchable text representation from object data, focusing on + * name, summary, and description fields for optimal search functionality. + * + * @param array $objectData Object data array + * @return string Generated text representation + */ + public function generateTextRepresentation(array $objectData): string + { + $textParts = []; + + // Extract key fields for text representation + if (!empty($objectData['name'])) { + $textParts[] = 'Name: ' . $objectData['name']; + } + + if (!empty($objectData['summary'])) { + $textParts[] = 'Summary: ' . $objectData['summary']; + } + + if (!empty($objectData['description'])) { + $textParts[] = 'Description: ' . $objectData['description']; + } + + // Add other relevant fields + if (!empty($objectData['tags']) && is_array($objectData['tags'])) { + $textParts[] = 'Tags: ' . implode(', ', $objectData['tags']); + } + + if (!empty($objectData['category'])) { + $textParts[] = 'Category: ' . $objectData['category']; + } + + // Combine all parts + $textRepresentation = implode('. ', $textParts); + + // Ensure we have meaningful content + if (empty(trim($textRepresentation))) { + $textRepresentation = 'Object ID: ' . ($objectData['id'] ?? 'unknown'); + } + + $this->logger->debug('Generated text representation for object', [ + 'object_id' => $objectData['id'] ?? 'unknown', + 'text_length' => strlen($textRepresentation) + ]); + + return $textRepresentation; + }//end generateTextRepresentation() + + /** + * Generate vector embedding for text content + * + * Creates a vector embedding that can be used for semantic search, + * similarity matching, and AI-powered content recommendations. + * + * @param string $text Text content to embed + * @param string|null $provider AI provider to use for embeddings + * @return array Vector embedding as array of floats + * @throws \RuntimeException If embedding generation fails + */ + public function generateEmbedding(string $text, ?string $provider = null): array + { + if (!$this->isAiEnabled()) { + throw new \RuntimeException('AI functionality is not enabled'); + } + + try { + $embeddingsClient = $this->createEmbeddingsClient($provider); + $embedding = $embeddingsClient->embedText($text); + + $this->logger->debug('Generated embedding for text', [ + 'text_length' => strlen($text), + 'embedding_dimensions' => count($embedding), + 'provider' => $provider ?? 'default' + ]); + + return $embedding; + } catch (\Exception $e) { + $this->logger->error('Failed to generate embedding', [ + 'error' => $e->getMessage(), + 'text_length' => strlen($text), + 'provider' => $provider ?? 'default' + ]); + throw new \RuntimeException('Failed to generate embedding: ' . $e->getMessage()); + } + }//end generateEmbedding() + + /** + * Enrich object with AI-generated content + * + * Automatically generates text representation and embeddings for an object + * if AI auto-enrichment is enabled in settings. + * + * @param array $objectData Object data to enrich + * @return array Enriched object data with AI-generated content + */ + public function enrichObject(array $objectData): array + { + if (!$this->isAiEnabled()) { + return $objectData; + } + + $aiSettings = $this->settingsService->getAiSettingsOnly(); + if (!$aiSettings['autoEnrichObjects']) { + return $objectData; + } + + try { + // Generate text representation + $textRepresentation = $this->generateTextRepresentation($objectData); + $objectData['text'] = $textRepresentation; + + // Generate embedding for the text + $embedding = $this->generateEmbedding($textRepresentation); + $objectData['embedding'] = $embedding; + + $this->logger->info('Object enriched with AI content', [ + 'object_id' => $objectData['id'] ?? 'unknown', + 'text_length' => strlen($textRepresentation), + 'embedding_dimensions' => count($embedding) + ]); + + } catch (\Exception $e) { + $this->logger->error('Failed to enrich object with AI content', [ + 'error' => $e->getMessage(), + 'object_id' => $objectData['id'] ?? 'unknown' + ]); + // Don't fail the entire operation if AI enrichment fails + } + + return $objectData; + }//end enrichObject() + + /** + * Handle chat conversation with AI + * + * Processes a chat message and returns an AI-generated response, + * with optional context and conversation history. + * + * @param string $message User message + * @param array $context Optional context information + * @param array $history Optional conversation history + * @param string|null $provider AI provider to use + * @return array Chat response with message and metadata + * @throws \RuntimeException If chat processing fails + */ + public function chat(string $message, array $context = [], array $history = [], ?string $provider = null): array + { + if (!$this->isAiEnabled()) { + throw new \RuntimeException('AI functionality is not enabled'); + } + + try { + $chatClient = $this->createChatClient($provider); + $config = $this->getAiConfig($provider); + + // Prepare messages for the conversation + $messages = []; + + // Add system message with context + $systemPrompt = $this->buildSystemPrompt($context); + if (!empty($systemPrompt)) { + $messages[] = new SystemMessage($systemPrompt); + } + + // Add conversation history + foreach ($history as $historyItem) { + if ($historyItem['role'] === 'user') { + $messages[] = new UserMessage($historyItem['content']); + } elseif ($historyItem['role'] === 'assistant') { + $messages[] = new Message($historyItem['content'], 'assistant'); + } + } + + // Add current user message + $messages[] = new UserMessage($message); + + // Generate response + $response = $chatClient->generateText($messages); + + $this->logger->info('AI chat response generated', [ + 'message_length' => strlen($message), + 'response_length' => strlen($response), + 'provider' => $provider ?? 'default', + 'context_provided' => !empty($context) + ]); + + return [ + 'message' => $response, + 'provider' => $config['provider'], + 'model' => $config['model'], + 'timestamp' => date('c'), + 'context_used' => !empty($context), + 'history_length' => count($history) + ]; + + } catch (\Exception $e) { + $this->logger->error('Failed to process chat message', [ + 'error' => $e->getMessage(), + 'message_length' => strlen($message), + 'provider' => $provider ?? 'default' + ]); + throw new \RuntimeException('Failed to process chat message: ' . $e->getMessage()); + } + }//end chat() + + /** + * Build system prompt with context information + * + * @param array $context Context information for the AI + * @return string System prompt text + */ + private function buildSystemPrompt(array $context): string + { + $prompt = "You are an AI assistant for OpenRegister, a Nextcloud application for managing object-oriented data stores. "; + $prompt .= "You help users understand and work with their data objects, schemas, and registers."; + + if (!empty($context['register'])) { + $prompt .= "\n\nCurrent register: " . $context['register']; + } + + if (!empty($context['schema'])) { + $prompt .= "\nCurrent schema: " . $context['schema']; + } + + if (!empty($context['objects'])) { + $prompt .= "\nAvailable objects: " . implode(', ', array_slice($context['objects'], 0, 10)); + if (count($context['objects']) > 10) { + $prompt .= " and " . (count($context['objects']) - 10) . " more"; + } + } + + $prompt .= "\n\nPlease provide helpful, accurate responses about OpenRegister functionality and data management."; + + return $prompt; + }//end buildSystemPrompt() + + /** + * Test AI connection and functionality + * + * @param string|null $provider AI provider to test + * @return array Test results with success status and details + */ + public function testConnection(?string $provider = null): array + { + try { + if (!$this->isAiEnabled()) { + return [ + 'success' => false, + 'message' => 'AI functionality is not enabled', + 'details' => [] + ]; + } + + $config = $this->getAiConfig($provider); + + // Test chat functionality + $chatClient = $this->createChatClient($provider); + $testMessage = new UserMessage("Hello, this is a connection test. Please respond with 'Connection successful'."); + $chatResponse = $chatClient->generateText([$testMessage]); + + // Test embeddings functionality + $embeddingsClient = $this->createEmbeddingsClient($provider); + $testEmbedding = $embeddingsClient->embedText("This is a test text for embedding generation."); + + return [ + 'success' => true, + 'message' => 'AI connection test successful', + 'details' => [ + 'provider' => $config['provider'], + 'chat_model' => $config['model'], + 'embedding_model' => $config['embeddingModel'], + 'chat_response_length' => strlen($chatResponse), + 'embedding_dimensions' => count($testEmbedding), + 'test_timestamp' => date('c') + ] + ]; + + } catch (\Exception $e) { + return [ + 'success' => false, + 'message' => 'AI connection test failed: ' . $e->getMessage(), + 'details' => [ + 'error' => $e->getMessage(), + 'provider' => $provider ?? 'default', + 'test_timestamp' => date('c') + ] + ]; + } + }//end testConnection() + +}//end class diff --git a/lib/Service/ObjectHandlers/SaveObject.php b/lib/Service/ObjectHandlers/SaveObject.php index 2f1dddafd..1bbe5a215 100644 --- a/lib/Service/ObjectHandlers/SaveObject.php +++ b/lib/Service/ObjectHandlers/SaveObject.php @@ -39,6 +39,7 @@ use OCA\OpenRegister\Service\ObjectCacheService; use OCA\OpenRegister\Service\SchemaCacheService; use OCA\OpenRegister\Service\SchemaFacetCacheService; +use OCA\OpenRegister\Service\AiService; use OCA\OpenRegister\Db\AuditTrailMapper; use OCP\IURLGenerator; use OCP\IUserSession; @@ -121,6 +122,7 @@ class SaveObject * @param ObjectCacheService $objectCacheService Object cache service for entity and query caching. * @param SchemaCacheService $schemaCacheService Schema cache service for schema entity caching. * @param SchemaFacetCacheService $schemaFacetCacheService Schema facet cache service for facet caching. + * @param AiService $aiService AI service for AI-powered content enrichment. * @param LoggerInterface $logger Logger interface for logging operations. * @param ArrayLoader $arrayLoader Twig array loader for template rendering. */ @@ -136,6 +138,7 @@ public function __construct( private readonly ObjectCacheService $objectCacheService, private readonly SchemaCacheService $schemaCacheService, private readonly SchemaFacetCacheService $schemaFacetCacheService, + private readonly AiService $aiService, private readonly LoggerInterface $logger, ArrayLoader $arrayLoader, ) { @@ -546,6 +549,51 @@ public function hydrateObjectMetadata(ObjectEntity $entity, Schema $schema): voi } } + // AI Enrichment: Generate text representation and embedding if AI is enabled + try { + if ($this->aiService->isAiEnabled()) { + // Prepare object data for AI enrichment + $enrichmentData = [ + 'id' => $entity->getUuid(), + 'name' => $entity->getName(), + 'description' => $entity->getDescription(), + 'summary' => $entity->getSummary(), + ]; + + // Generate text representation from name, summary, and description + $textRepresentation = $this->aiService->generateTextRepresentation($enrichmentData); + if (!empty($textRepresentation)) { + $entity->setText($textRepresentation); + + // Generate embedding for the text representation + try { + $embedding = $this->aiService->generateEmbedding($textRepresentation); + if (!empty($embedding)) { + $entity->setEmbedding($embedding); + } + } catch (\Exception $e) { + $this->logger->warning('Failed to generate embedding for object', [ + 'object_id' => $entity->getUuid(), + 'error' => $e->getMessage() + ]); + // Continue without embedding if generation fails + } + } + + $this->logger->debug('AI enrichment completed for object', [ + 'object_id' => $entity->getUuid(), + 'text_length' => strlen($textRepresentation ?? ''), + 'has_embedding' => !empty($entity->getEmbedding()) + ]); + } + } catch (\Exception $e) { + $this->logger->warning('AI enrichment failed for object', [ + 'object_id' => $entity->getUuid(), + 'error' => $e->getMessage() + ]); + // Continue without AI enrichment if it fails + } + }//end hydrateObjectMetadata() diff --git a/lib/Service/SettingsService.php b/lib/Service/SettingsService.php index 90ef559b5..2f696a457 100644 --- a/lib/Service/SettingsService.php +++ b/lib/Service/SettingsService.php @@ -327,6 +327,41 @@ public function getSettings(): array ]; }//end if + // AI Settings with defaults + $aiConfig = $this->config->getValueString($this->appName, 'ai', ''); + if (empty($aiConfig)) { + $data['ai'] = [ + 'enabled' => false, + 'provider' => 'openai', + 'host' => '', + 'port' => 443, + 'apiKey' => '', + 'model' => 'gpt-3.5-turbo', + 'embeddingModel' => 'text-embedding-ada-002', + 'embeddingDimensions' => 1536, + 'maxTokens' => 150, + 'temperature' => 0.7, + 'timeout' => 30, + 'autoEnrichObjects' => true, + ]; + } else { + $aiData = json_decode($aiConfig, true); + $data['ai'] = [ + 'enabled' => $aiData['enabled'] ?? false, + 'provider' => $aiData['provider'] ?? 'openai', + 'host' => $aiData['host'] ?? '', + 'port' => $aiData['port'] ?? 443, + 'apiKey' => $aiData['apiKey'] ?? '', + 'model' => $aiData['model'] ?? 'gpt-3.5-turbo', + 'embeddingModel' => $aiData['embeddingModel'] ?? 'text-embedding-ada-002', + 'embeddingDimensions' => $aiData['embeddingDimensions'] ?? 1536, + 'maxTokens' => $aiData['maxTokens'] ?? 150, + 'temperature' => $aiData['temperature'] ?? 0.7, + 'timeout' => $aiData['timeout'] ?? 30, + 'autoEnrichObjects' => $aiData['autoEnrichObjects'] ?? true, + ]; + }//end if + return $data; } catch (\Exception $e) { throw new \RuntimeException('Failed to retrieve settings: '.$e->getMessage()); @@ -481,6 +516,26 @@ public function updateSettings(array $data): array $this->config->setValueString($this->appName, 'solr', json_encode($solrConfig)); } + // Handle AI settings + if (isset($data['ai'])) { + $aiData = $data['ai']; + $aiConfig = [ + 'enabled' => $aiData['enabled'] ?? false, + 'provider' => $aiData['provider'] ?? 'openai', + 'host' => $aiData['host'] ?? '', + 'port' => (int) ($aiData['port'] ?? 443), + 'apiKey' => $aiData['apiKey'] ?? '', + 'model' => $aiData['model'] ?? 'gpt-3.5-turbo', + 'embeddingModel' => $aiData['embeddingModel'] ?? 'text-embedding-ada-002', + 'embeddingDimensions' => (int) ($aiData['embeddingDimensions'] ?? 1536), + 'maxTokens' => (int) ($aiData['maxTokens'] ?? 150), + 'temperature' => (float) ($aiData['temperature'] ?? 0.7), + 'timeout' => (int) ($aiData['timeout'] ?? 30), + 'autoEnrichObjects' => $aiData['autoEnrichObjects'] ?? true, + ]; + $this->config->setValueString($this->appName, 'ai', json_encode($aiConfig)); + } + // Return the updated settings return $this->getSettings(); } catch (\Exception $e) { @@ -2564,5 +2619,140 @@ public function getVersionInfoOnly(): array } } + /** + * Get AI settings only + * + * @return array AI configuration + * @throws \RuntimeException If AI settings retrieval fails + */ + public function getAiSettingsOnly(): array + { + try { + $aiConfig = $this->config->getValueString($this->appName, 'ai', ''); + + if (empty($aiConfig)) { + return [ + 'enabled' => false, + 'provider' => 'openai', + 'host' => '', + 'port' => 443, + 'apiKey' => '', + 'model' => 'gpt-3.5-turbo', + 'embeddingModel' => 'text-embedding-ada-002', + 'embeddingDimensions' => 1536, + 'maxTokens' => 150, + 'temperature' => 0.7, + 'timeout' => 30, + 'autoEnrichObjects' => true, + ]; + } + + $aiData = json_decode($aiConfig, true); + return [ + 'enabled' => $aiData['enabled'] ?? false, + 'provider' => $aiData['provider'] ?? 'openai', + 'host' => $aiData['host'] ?? '', + 'port' => $aiData['port'] ?? 443, + 'apiKey' => $aiData['apiKey'] ?? '', + 'model' => $aiData['model'] ?? 'gpt-3.5-turbo', + 'embeddingModel' => $aiData['embeddingModel'] ?? 'text-embedding-ada-002', + 'embeddingDimensions' => $aiData['embeddingDimensions'] ?? 1536, + 'maxTokens' => $aiData['maxTokens'] ?? 150, + 'temperature' => $aiData['temperature'] ?? 0.7, + 'timeout' => $aiData['timeout'] ?? 30, + 'autoEnrichObjects' => $aiData['autoEnrichObjects'] ?? true, + ]; + } catch (\Exception $e) { + throw new \RuntimeException('Failed to retrieve AI settings: '.$e->getMessage()); + } + } + + /** + * Update AI settings only + * + * @param array $aiData AI configuration data + * @return array Updated AI configuration + * @throws \RuntimeException If AI settings update fails + */ + public function updateAiSettingsOnly(array $aiData): array + { + try { + $aiConfig = [ + 'enabled' => $aiData['enabled'] ?? false, + 'provider' => $aiData['provider'] ?? 'openai', + 'host' => $aiData['host'] ?? '', + 'port' => (int) ($aiData['port'] ?? 443), + 'apiKey' => $aiData['apiKey'] ?? '', + 'model' => $aiData['model'] ?? 'gpt-3.5-turbo', + 'embeddingModel' => $aiData['embeddingModel'] ?? 'text-embedding-ada-002', + 'embeddingDimensions' => (int) ($aiData['embeddingDimensions'] ?? 1536), + 'maxTokens' => (int) ($aiData['maxTokens'] ?? 150), + 'temperature' => (float) ($aiData['temperature'] ?? 0.7), + 'timeout' => (int) ($aiData['timeout'] ?? 30), + 'autoEnrichObjects' => $aiData['autoEnrichObjects'] ?? true, + ]; + + $this->config->setValueString($this->appName, 'ai', json_encode($aiConfig)); + return $aiConfig; + } catch (\Exception $e) { + throw new \RuntimeException('Failed to update AI settings: '.$e->getMessage()); + } + } + + /** + * Test AI connection with current settings + * + * @return array Connection test results with status and details + */ + public function testAiConnection(): array + { + try { + $aiSettings = $this->getAiSettingsOnly(); + + if (!$aiSettings['enabled']) { + return [ + 'success' => false, + 'message' => 'AI is disabled in settings', + 'details' => [] + ]; + } + + // For now, return a simple validation of settings + // TODO: Implement actual AI connection testing when AiService is created + $requiredFields = []; + if (empty($aiSettings['apiKey']) && $aiSettings['provider'] !== 'ollama') { + $requiredFields[] = 'apiKey'; + } + if (empty($aiSettings['model'])) { + $requiredFields[] = 'model'; + } + + if (!empty($requiredFields)) { + return [ + 'success' => false, + 'message' => 'Missing required AI configuration fields: ' . implode(', ', $requiredFields), + 'details' => ['missing_fields' => $requiredFields] + ]; + } + + return [ + 'success' => true, + 'message' => 'AI configuration appears valid', + 'details' => [ + 'provider' => $aiSettings['provider'], + 'model' => $aiSettings['model'], + 'embedding_model' => $aiSettings['embeddingModel'], + 'auto_enrich' => $aiSettings['autoEnrichObjects'] + ] + ]; + } catch (\Exception $e) { + return [ + 'success' => false, + 'message' => 'AI connection test failed: ' . $e->getMessage(), + 'details' => ['exception' => $e->getMessage()] + ]; + } + } + }//end class diff --git a/website/docs/Features/ai-functionality.md b/website/docs/Features/ai-functionality.md index 271246295..5ed0bc045 100644 --- a/website/docs/Features/ai-functionality.md +++ b/website/docs/Features/ai-functionality.md @@ -1,6 +1,6 @@ # AI Functionality -OpenRegister integrates with the [LLPhant framework](https://github.com/LLPhant/LLPhant) to provide advanced AI capabilities including automatic text generation and vector embeddings for semantic search and content analysis. +OpenRegister integrates with the [LLPhant framework](https://github.com/LLPhant/LLPhant) to provide advanced AI capabilities including automatic text generation, vector embeddings, chat interactions, and intelligent content enrichment for semantic search and analysis. ## Overview @@ -8,8 +8,11 @@ The AI functionality enables: - **Automatic Text Generation**: Generate searchable text representations of objects - **Vector Embeddings**: Create semantic embeddings for similarity search and recommendations +- **AI Chat Interface**: Interactive chat functionality with context-aware responses +- **Object Enrichment**: Automatic AI-powered content enhancement during object creation/updates - **Content Analysis**: AI-powered analysis of object content - **Semantic Search**: Enhanced search capabilities using vector similarity +- **Multi-Provider Support**: Support for OpenAI, Ollama, Azure, and Anthropic ## Supported AI Providers @@ -294,6 +297,144 @@ When enabling AI functionality on existing installations: 3. **Bulk Processing**: Process existing objects for embeddings 4. **Verify Results**: Test search and AI functionality +## API Endpoints + +### Settings Endpoints + +- `GET /api/settings/ai` - Retrieve current AI configuration +- `PUT /api/settings/ai` - Update AI configuration +- `POST /api/settings/ai/test` - Test AI connection and functionality + +### Chat Endpoints + +- `POST /api/chat` - Send a chat message and receive AI response +- `GET /api/chat/capabilities` - Get AI chat capabilities and status +- `POST /api/chat/test` - Test AI chat connection +- `GET /api/chat/context` - Get context information for AI chat +- `POST /api/chat/generate-text` - Generate text representation for an object +- `POST /api/chat/generate-embedding` - Generate vector embedding for text + +## Advanced Usage + +### AI Chat Interface + +Use the chat API to interact with AI: + +```javascript +// Send a chat message with context +const response = await fetch('/api/chat', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + message: 'How do I create a new register?', + context: { + register: 'current-register', + schema: 'current-schema' + }, + history: [ + { role: 'user', content: 'Previous question...' }, + { role: 'assistant', content: 'Previous response...' } + ] + }) +}); + +const chatResponse = await response.json(); +console.log(chatResponse.response); // AI-generated response +``` + +### Manual Text Generation + +Generate text representations manually: + +```javascript +// Generate text for an object +const textResponse = await fetch('/api/chat/generate-text', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + object: { + name: 'Product Name', + description: 'Product description...', + summary: 'Brief summary...' + } + }) +}); + +const { text } = await textResponse.json(); +``` + +### Manual Embedding Generation + +Generate vector embeddings for custom text: + +```javascript +// Generate embedding for text +const embeddingResponse = await fetch('/api/chat/generate-embedding', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + text: 'Text to embed...', + provider: 'openai' // optional + }) +}); + +const { embedding, dimensions } = await embeddingResponse.json(); +``` + +### Object Enrichment Process + +The AI enrichment process automatically occurs during object creation and updates: + +```mermaid +flowchart TD + A[Object Creation/Update] --> B{AI Enabled?} + B -->|No| C[Save Object Normally] + B -->|Yes| D[Extract Name, Description, Summary] + D --> E[Generate Text Representation] + E --> F[Generate Vector Embedding] + F --> G[Save Object with AI Data] + G --> H[Object Saved Successfully] +``` + +## Integration with Business Logic + +### Object Metadata Hydration + +AI enrichment is integrated into the object metadata hydration process in `SaveObject.php`: + +1. **Metadata Extraction**: Name, description, and summary are extracted from object data +2. **Text Generation**: AI generates a searchable text representation +3. **Embedding Creation**: Vector embedding is created from the text +4. **Database Storage**: Both text and embedding are stored with the object + +### Performance Considerations + +- **Asynchronous Processing**: AI operations are designed to be non-blocking +- **Error Handling**: Failed AI operations don't prevent object creation +- **Caching**: Consider implementing caching for frequently used embeddings +- **Rate Limiting**: Be aware of API rate limits for external providers + +## Troubleshooting + +### Common Issues + +1. **AI Not Working**: Check if AI is enabled in settings and API keys are valid +2. **Connection Errors**: Test connection using the test endpoints +3. **Missing Embeddings**: Verify embedding model is supported by provider +4. **Performance Issues**: Consider using local Ollama for better performance + +### Debugging + +Enable debug logging to see AI operations: + +```php +$this->logger->debug('AI enrichment completed', [ + 'object_id' => $entity->getUuid(), + 'text_length' => strlen($textRepresentation), + 'has_embedding' => !empty($entity->getEmbedding()) +]); +``` + ## Future Enhancements Planned AI features include: @@ -302,6 +443,9 @@ Planned AI features include: - Multi-language support - Custom model fine-tuning - Advanced analytics and insights +- Batch Processing: Bulk AI enrichment for existing objects +- Custom Models: Support for fine-tuned models +- Semantic Search: Enhanced search using vector embeddings ## Support @@ -309,3 +453,4 @@ For AI functionality support: - Check LLPhant documentation: https://github.com/LLPhant/LLPhant - Review provider-specific documentation - Contact support for configuration assistance +- Test AI functionality using the provided endpoints