diff --git a/GUI/src/components/MessageContent/MessageContent.scss b/GUI/src/components/MessageContent/MessageContent.scss new file mode 100644 index 0000000..7b4eea5 --- /dev/null +++ b/GUI/src/components/MessageContent/MessageContent.scss @@ -0,0 +1,61 @@ +.message-content-wrapper { + width: 100%; + + .message-text { + margin-bottom: 12px; + line-height: 1.6; + white-space: pre-wrap; + word-wrap: break-word; + } + + .message-references { + margin-top: 16px; + padding-top: 12px; + border-top: 1px solid rgba(0, 0, 0, 0.1); + + .references-title { + display: block; + font-weight: 600; + margin-bottom: 8px; + font-size: 14px; + } + + .references-list { + margin: 0; + padding-left: 20px; + list-style-type: decimal; + + li { + margin-bottom: 6px; + line-height: 1.5; + + &:last-child { + margin-bottom: 0; + } + } + + .reference-link { + color: #0066cc; + text-decoration: none; + word-break: break-all; + transition: color 0.2s ease; + + &:hover { + color: #0052a3; + text-decoration: underline; + } + + &:visited { + color: #551a8b; + } + } + } + } +} + +// Dark mode support +.test-production-llm__message--bot { + .message-references { + border-top-color: rgba(255, 255, 255, 0.1); + } +} diff --git a/GUI/src/components/MessageContent/index.tsx b/GUI/src/components/MessageContent/index.tsx new file mode 100644 index 0000000..63ff7f2 --- /dev/null +++ b/GUI/src/components/MessageContent/index.tsx @@ -0,0 +1,90 @@ +import { FC } from 'react'; +import './MessageContent.scss'; + +interface MessageContentProps { + content: string; +} + +const MessageContent: FC = ({ content }) => { + // Function to parse and render message content with proper formatting + const renderContent = () => { + // Split by **References:** pattern + const referencesMatch = content.match(/\*\*References:\*\*([\s\S]*)/); + + if (!referencesMatch) { + // No references, return plain content with line breaks + return ( +
+ {content.split('\n').map((line, index) => ( + + {line} + {index < content.split('\n').length - 1 &&
} +
+ ))} +
+ ); + } + + // Split content into main text and references + const mainText = content.substring(0, referencesMatch.index); + const referencesText = referencesMatch[1].trim(); + + // Parse numbered references with URLs + const referenceLines = referencesText + .split('\n') + .filter(line => line.trim()) + .map(line => { + // Match pattern: "1. https://url" or "1. url" + const match = line.match(/^(\d+)\.\s+(https?:\/\/[^\s]+)/); + if (match) { + return { + number: match[1], + url: match[2], + }; + } + return null; + }) + .filter(Boolean); + + return ( +
+ {/* Main text */} + {mainText && ( +
+ {mainText.split('\n').map((line, index) => ( + + {line} + {index < mainText.split('\n').length - 1 &&
} +
+ ))} +
+ )} + + {/* References section */} + {referenceLines.length > 0 && ( +
+ References: +
    + {referenceLines.map((ref, index) => ( +
  1. + + {ref!.url} + +
  2. + ))} +
+
+ )} +
+ ); + }; + + return <>{renderContent()}; +}; + +export default MessageContent; diff --git a/GUI/src/hooks/useStreamingResponse.tsx b/GUI/src/hooks/useStreamingResponse.tsx new file mode 100644 index 0000000..211d44f --- /dev/null +++ b/GUI/src/hooks/useStreamingResponse.tsx @@ -0,0 +1,130 @@ +import { useState, useRef, useCallback, useEffect } from 'react'; +import axios from 'axios'; + +interface StreamingOptions { + authorId: string; + conversationHistory: Array<{ authorRole: string; message: string; timestamp: string }>; + url: string; +} + +interface UseStreamingResponseReturn { + startStreaming: (message: string, options: StreamingOptions, onToken: (token: string) => void, onComplete: () => void, onError: (error: string) => void) => Promise; + stopStreaming: () => void; + isStreaming: boolean; +} + +export const useStreamingResponse = (channelId: string): UseStreamingResponseReturn => { + const [isStreaming, setIsStreaming] = useState(false); + const eventSourceRef = useRef(null); + + const stopStreaming = useCallback(() => { + if (eventSourceRef.current) { + console.log('[SSE] Closing connection'); + eventSourceRef.current.close(); + eventSourceRef.current = null; + } + setIsStreaming(false); + }, []); + + // Cleanup on unmount + useEffect(() => { + return () => { + if (eventSourceRef.current) { + eventSourceRef.current.close(); + } + }; + }, []); + + const startStreaming = useCallback( + async ( + message: string, + options: StreamingOptions, + onToken: (token: string) => void, + onComplete: () => void, + onError: (error: string) => void + ) => { + console.log('[SSE] Starting streaming for channel:', channelId); + + // Close any existing connection + stopStreaming(); + + try { + // Step 1: Open SSE connection FIRST + const sseUrl = `https://est-rag-rtc.rootcode.software/notifications-server/sse/stream/${channelId}`; + console.log('[SSE] Connecting to:', sseUrl); + + const eventSource = new EventSource(sseUrl); + eventSourceRef.current = eventSource; + + eventSource.onopen = () => { + console.log('[SSE] Connection opened'); + }; + + eventSource.onmessage = (event) => { + console.log('[SSE] Message received:', event.data); + + try { + const data = JSON.parse(event.data); + + if (data.type === 'stream_start') { + console.log('[SSE] Stream started'); + setIsStreaming(true); + } else if (data.type === 'stream_chunk' && data.content) { + console.log('[SSE] Token:', data.content); + onToken(data.content); + } else if (data.type === 'stream_end') { + console.log('[SSE] Stream ended'); + setIsStreaming(false); + eventSource.close(); + eventSourceRef.current = null; + onComplete(); + } else if (data.type === 'stream_error') { + console.error('[SSE] Stream error:', data.error); + setIsStreaming(false); + eventSource.close(); + eventSourceRef.current = null; + onError(data.error || 'Stream error occurred'); + } + } catch (e) { + console.error('[SSE] Failed to parse message:', e); + } + }; + + eventSource.onerror = (err) => { + console.error('[SSE] Connection error:', err); + setIsStreaming(false); + eventSource.close(); + eventSourceRef.current = null; + onError('Connection error'); + }; + + // Step 2: Wait a moment for SSE connection to establish, then trigger the stream + await new Promise(resolve => setTimeout(resolve, 500)); + + // Step 3: POST to trigger streaming + const postUrl = `https://est-rag-rtc.rootcode.software/notifications-server/channels/${channelId}/orchestrate/stream`; + console.log('[API] Triggering stream:', postUrl); + + await axios.post(postUrl, { + message, + options, + }); + + console.log('[API] Stream triggered successfully'); + + } catch (err) { + console.error('[SSE] Error starting stream:', err); + stopStreaming(); + onError(err instanceof Error ? err.message : 'Failed to start streaming'); + } + }, + [channelId, stopStreaming] + ); + + return { + startStreaming, + stopStreaming, + isStreaming, + }; +}; + diff --git a/GUI/src/pages/TestProductionLLM/index.tsx b/GUI/src/pages/TestProductionLLM/index.tsx index a9c1493..90084b4 100644 --- a/GUI/src/pages/TestProductionLLM/index.tsx +++ b/GUI/src/pages/TestProductionLLM/index.tsx @@ -1,8 +1,9 @@ -import { FC, useState, useRef, useEffect } from 'react'; +import { FC, useState, useRef, useEffect, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { Button, FormTextarea, Section } from 'components'; -import { productionInference, ProductionInferenceRequest } from 'services/inference'; +import { Button, FormTextarea } from 'components'; import { useToast } from 'hooks/useToast'; +import { useStreamingResponse } from 'hooks/useStreamingResponse'; +import MessageContent from 'components/MessageContent'; import './TestProductionLLM.scss'; interface Message { @@ -15,139 +16,115 @@ interface Message { const TestProductionLLM: FC = () => { const { t } = useTranslation(); const toast = useToast(); - const [message, setMessage] = useState(''); + const [inputMessage, setInputMessage] = useState(''); const [messages, setMessages] = useState([]); const [isLoading, setIsLoading] = useState(false); const messagesEndRef = useRef(null); - const scrollToBottom = () => { - messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }); - }; + // Generate a unique channel ID for this session + const channelId = useMemo(() => `channel-${Math.random().toString(36).substring(2, 15)}`, []); + const { startStreaming, stopStreaming, isStreaming } = useStreamingResponse(channelId); + // Auto-scroll to bottom useEffect(() => { - scrollToBottom(); + messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }); }, [messages]); const handleSendMessage = async () => { - if (!message.trim()) { + if (!inputMessage.trim()) { toast.open({ type: 'warning', - title: t('warningTitle'), - message: t('emptyMessageWarning'), + title: 'Warning', + message: 'Please enter a message', }); return; } + const userMessageText = inputMessage.trim(); + + // Add user message const userMessage: Message = { id: `user-${Date.now()}`, - content: message.trim(), + content: userMessageText, isUser: true, timestamp: new Date().toISOString(), }; - // Add user message to chat setMessages(prev => [...prev, userMessage]); - setMessage(''); + setInputMessage(''); setIsLoading(true); - try { - // Hardcoded values as requested - const request: ProductionInferenceRequest = { - chatId: 'test-chat-001', - message: userMessage.content, - authorId: 'test-author-001', - conversationHistory: messages.map(msg => ({ - authorRole: msg.isUser ? 'user' : 'bot', - message: msg.content, - timestamp: msg.timestamp, - })), - url: 'https://test-url.example.com', - }; - - let response; - let attemptCount = 0; - const maxAttempts = 2; - - // Retry logic - while (attemptCount < maxAttempts) { - try { - attemptCount++; - console.log(`Production Inference Attempt ${attemptCount}/${maxAttempts}`); - response = await productionInference(request); - - // If we get a successful response, break out of retry loop - if (!response.status || response.status < 400) { - break; - } - - // If first attempt failed with error status, retry once more - if (attemptCount < maxAttempts && response.status >= 400) { - console.log('Retrying due to error status...'); - continue; - } - } catch (err) { - // If first attempt threw an error, retry once more - if (attemptCount < maxAttempts) { - console.log('Retrying due to exception...'); - continue; - } - throw err; // Re-throw on final attempt - } - } + // Create bot message ID + const botMessageId = `bot-${Date.now()}`; - console.log('Production Inference Response:', response); + // Prepare conversation history (exclude the current user message) + const conversationHistory = messages.map(msg => ({ + authorRole: msg.isUser ? 'user' : 'bot', + message: msg.content, + timestamp: msg.timestamp, + })); - // Create bot response message - let botContent = ''; - let botMessageType: 'success' | 'error' = 'success'; + const streamingOptions = { + authorId: 'test-user-456', + conversationHistory, + url: 'opensearch-dashboard-test', + }; - if (response.status && response.status >= 400) { - // Error response - botContent = response.content || 'An error occurred while processing your request.'; - botMessageType = 'error'; - } else { - // Success response - botContent = response?.response?.content || 'Response received successfully.'; + // Callbacks for streaming + const onToken = (token: string) => { + console.log('[Component] Received token:', token); + + setMessages(prev => { + // Find the bot message + const botMsgIndex = prev.findIndex(msg => msg.id === botMessageId); - if (response.questionOutOfLlmScope) { - botContent += ' (Note: This question appears to be outside the LLM scope)'; + if (botMsgIndex === -1) { + // First token - add the bot message + console.log('[Component] Adding bot message with first token'); + setIsLoading(false); + return [ + ...prev, + { + id: botMessageId, + content: token, + isUser: false, + timestamp: new Date().toISOString(), + } + ]; + } else { + // Append token to existing message + console.log('[Component] Appending token to existing message'); + const updated = [...prev]; + updated[botMsgIndex] = { + ...updated[botMsgIndex], + content: updated[botMsgIndex].content + token, + }; + return updated; } - } - - const botMessage: Message = { - id: `bot-${Date.now()}`, - content: botContent, - isUser: false, - timestamp: new Date().toISOString(), - }; - - setMessages(prev => [...prev, botMessage]); + }); + }; - // Show toast notification - // toast.open({ - // type: botMessageType, - // title: t('errorOccurred'), - // message: t('errorMessage'), - // }); + const onComplete = () => { + console.log('[Component] Stream completed'); + setIsLoading(false); + }; - } catch (error) { - console.error('Error sending message:', error); + const onError = (error: string) => { + console.error('[Component] Stream error:', error); + setIsLoading(false); - const errorMessage: Message = { - id: `error-${Date.now()}`, - content: 'Failed to send message. Please check your connection and try again.', - isUser: false, - timestamp: new Date().toISOString(), - }; - - setMessages(prev => [...prev, errorMessage]); - toast.open({ type: 'error', - title: 'Connection Error', - message: 'Unable to connect to the production LLM service.', + title: 'Streaming Error', + message: error, }); - } finally { + }; + + // Start streaming + try { + await startStreaming(userMessageText, streamingOptions, onToken, onComplete, onError); + } catch (error) { + console.error('[Component] Failed to start streaming:', error); setIsLoading(false); } }; @@ -161,6 +138,7 @@ const TestProductionLLM: FC = () => { const clearChat = () => { setMessages([]); + stopStreaming(); toast.open({ type: 'info', title: 'Chat Cleared', @@ -195,7 +173,8 @@ const TestProductionLLM: FC = () => { }`} >
- {msg.content} + + {/* {msg.content} */}
{new Date(msg.timestamp).toLocaleTimeString()} @@ -222,20 +201,20 @@ const TestProductionLLM: FC = () => { setMessage(e.target.value)} + value={inputMessage} + onChange={(e) => setInputMessage(e.target.value)} onKeyDown={handleKeyPress} placeholder="Type your message here... (Press Enter to send, Shift+Enter for new line)" hideLabel maxRows={4} - disabled={isLoading} + disabled={isLoading || isStreaming} />
@@ -244,4 +223,4 @@ const TestProductionLLM: FC = () => { ); }; -export default TestProductionLLM; \ No newline at end of file +export default TestProductionLLM; diff --git a/docker-compose.yml b/docker-compose.yml index eae852a..f4a43b0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -128,7 +128,7 @@ services: - REACT_APP_RUUTER_API_URL=http://localhost:8086 - REACT_APP_RUUTER_PRIVATE_API_URL=http://localhost:8088 - REACT_APP_CUSTOMER_SERVICE_LOGIN=http://localhost:3004/et/dev-auth - - REACT_APP_CSP=upgrade-insecure-requests; default-src 'self'; font-src 'self' data:; img-src 'self' data:; script-src 'self' 'unsafe-eval' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; object-src 'none'; connect-src 'self' http://localhost:8086 http://localhost:8088 http://localhost:3004 http://localhost:3005 ws://localhost; + - REACT_APP_CSP=upgrade-insecure-requests; default-src 'self'; font-src 'self' data:; img-src 'self' data:; script-src 'self' 'unsafe-eval' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; object-src 'none'; connect-src 'self' http://localhost:8086 http://localhost:8088 http://localhost:3004 http://localhost:3005 ws://localhost https://est-rag-rtc.rootcode.software; - DEBUG_ENABLED=true - CHOKIDAR_USEPOLLING=true - PORT=3001 @@ -568,7 +568,7 @@ services: PORT: 4040 REFRESH_INTERVAL: 1000 QUEUE_REFRESH_INTERVAL: 4000 - CORS_WHITELIST_ORIGINS: http://localhost:3001,http://localhost:3003,http://localhost:3004,http://localhost:8080 + CORS_WHITELIST_ORIGINS: http://localhost:3001,http://localhost:3003,http://localhost:3004,http://localhost:8080,https://est-rag-rtc.rootcode.software RUUTER_URL: http://ruuter-public:8086 CHAT_TERMINATION_DELAY: 5000 volumes: