From 32fa3bbe28943f29906bbcd85333cfc7464e0e59 Mon Sep 17 00:00:00 2001 From: Claudio Fuentes Date: Thu, 31 Jul 2025 15:21:38 -0400 Subject: [PATCH 1/5] refactor: unify S3 client configuration across app and portal - Simplified the S3 client initialization by consolidating the credential handling into a single configuration object, ensuring explicit credentials are always provided. - Enhanced code readability and maintainability by removing conditional logic for environment-specific credential handling. --- apps/app/src/app/s3.ts | 24 +++++++----------------- apps/portal/src/utils/s3.ts | 24 +++++++----------------- 2 files changed, 14 insertions(+), 34 deletions(-) diff --git a/apps/app/src/app/s3.ts b/apps/app/src/app/s3.ts index 86b045a56..10571ae57 100644 --- a/apps/app/src/app/s3.ts +++ b/apps/app/src/app/s3.ts @@ -21,23 +21,13 @@ if (!APP_AWS_ACCESS_KEY_ID || !APP_AWS_SECRET_ACCESS_KEY || !BUCKET_NAME || !APP // Create a single S3 client instance // Add null checks or assertions if the checks above don't guarantee non-null values -export const s3Client = new S3Client( - // If we are on Vercel, we MUST provide explicit credentials. - process.env.VERCEL === '1' - ? { - region: APP_AWS_REGION!, - credentials: { - accessKeyId: APP_AWS_ACCESS_KEY_ID!, - secretAccessKey: APP_AWS_SECRET_ACCESS_KEY!, - }, - } - : // For any other environment (like AWS ECS or local dev), - // we only need to provide the region. The AWS SDK will - // automatically find the credentials from the environment. - { - region: APP_AWS_REGION!, - }, -); +export const s3Client = new S3Client({ + region: APP_AWS_REGION!, + credentials: { + accessKeyId: APP_AWS_ACCESS_KEY_ID!, + secretAccessKey: APP_AWS_SECRET_ACCESS_KEY!, + }, +}); // Ensure BUCKET_NAME is exported and non-null checked if needed elsewhere explicitly if (!BUCKET_NAME && process.env.NODE_ENV === 'production') { diff --git a/apps/portal/src/utils/s3.ts b/apps/portal/src/utils/s3.ts index e4c4a0d4e..827f7f297 100644 --- a/apps/portal/src/utils/s3.ts +++ b/apps/portal/src/utils/s3.ts @@ -21,23 +21,13 @@ if (!APP_AWS_ACCESS_KEY_ID || !APP_AWS_SECRET_ACCESS_KEY || !BUCKET_NAME || !APP // Create a single S3 client instance // Add null checks or assertions if the checks above don't guarantee non-null values -export const s3Client = new S3Client( - // If we are on Vercel, we MUST provide explicit credentials. - process.env.VERCEL === '1' - ? { - region: APP_AWS_REGION!, - credentials: { - accessKeyId: APP_AWS_ACCESS_KEY_ID!, - secretAccessKey: APP_AWS_SECRET_ACCESS_KEY!, - }, - } - : // For any other environment (like AWS ECS or local dev), - // we only need to provide the region. The AWS SDK will - // automatically find the credentials from the environment. - { - region: APP_AWS_REGION!, - }, -); +export const s3Client = new S3Client({ + region: APP_AWS_REGION!, + credentials: { + accessKeyId: APP_AWS_ACCESS_KEY_ID!, + secretAccessKey: APP_AWS_SECRET_ACCESS_KEY!, + }, +}); // Ensure BUCKET_NAME is exported and non-null checked if needed elsewhere explicitly if (!BUCKET_NAME && process.env.NODE_ENV === 'production') { From 43d1232d2afd84cfa65ef25302628f737aab0ff5 Mon Sep 17 00:00:00 2001 From: Claudio Fuentes Date: Thu, 31 Jul 2025 15:39:57 -0400 Subject: [PATCH 2/5] refactor: improve S3 client initialization and error handling - Enhanced the S3 client setup by consolidating credential checks and providing detailed error logging for missing or invalid environment variables. - Introduced a redaction function to mask sensitive information in error logs, improving security and clarity during initialization failures. - Ensured the application fails gracefully when S3 client configuration is invalid, preventing further execution without a valid client. --- apps/app/src/app/s3.ts | 57 +++++++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/apps/app/src/app/s3.ts b/apps/app/src/app/s3.ts index 10571ae57..749c29401 100644 --- a/apps/app/src/app/s3.ts +++ b/apps/app/src/app/s3.ts @@ -6,34 +6,45 @@ const APP_AWS_SECRET_ACCESS_KEY = process.env.APP_AWS_SECRET_ACCESS_KEY; export const BUCKET_NAME = process.env.APP_AWS_BUCKET_NAME; -if (!APP_AWS_ACCESS_KEY_ID || !APP_AWS_SECRET_ACCESS_KEY || !BUCKET_NAME || !APP_AWS_REGION) { - // Log the error in production environments - if (process.env.NODE_ENV === 'production') { - console.error('AWS S3 credentials or configuration missing in environment variables.'); - } else { - // Throw in development for immediate feedback - throw new Error('AWS S3 credentials or configuration missing. Check environment variables.'); - } - // Optionally, you could export a dummy/error client or null here - // depending on how you want consuming code to handle the missing config. -} +let s3ClientInstance: S3Client; -// Create a single S3 client instance -// Add null checks or assertions if the checks above don't guarantee non-null values +const redact = (value?: string) => { + if (!value) return 'undefined'; + return `${value.substring(0, 4)}****`; +}; -export const s3Client = new S3Client({ - region: APP_AWS_REGION!, - credentials: { - accessKeyId: APP_AWS_ACCESS_KEY_ID!, - secretAccessKey: APP_AWS_SECRET_ACCESS_KEY!, - }, -}); +try { + if (!APP_AWS_ACCESS_KEY_ID || !APP_AWS_SECRET_ACCESS_KEY || !BUCKET_NAME || !APP_AWS_REGION) { + throw new Error('AWS S3 credentials or configuration missing. Check environment variables.'); + } -// Ensure BUCKET_NAME is exported and non-null checked if needed elsewhere explicitly -if (!BUCKET_NAME && process.env.NODE_ENV === 'production') { - console.error('AWS_BUCKET_NAME is not defined.'); + s3ClientInstance = new S3Client({ + region: APP_AWS_REGION, + credentials: { + accessKeyId: APP_AWS_ACCESS_KEY_ID, + secretAccessKey: APP_AWS_SECRET_ACCESS_KEY, + }, + }); +} catch (error) { + console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'); + console.error('!!! FAILED TO INITIALIZE S3 CLIENT !!!'); + console.error('!!! This is likely due to missing or invalid environment variables. !!!'); + console.error('--- Provided Configuration ---'); + console.error(`APP_AWS_REGION: ${APP_AWS_REGION}`); + console.error(`APP_AWS_ACCESS_KEY_ID: ${redact(APP_AWS_ACCESS_KEY_ID)}`); + console.error(`APP_AWS_SECRET_ACCESS_KEY: ${redact(APP_AWS_SECRET_ACCESS_KEY)}`); + console.error(`APP_AWS_BUCKET_NAME: ${BUCKET_NAME}`); + console.error('-----------------------------'); + console.error('Error:', error); + console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'); + // Prevent the app from continuing without a valid S3 client + // In a real-world scenario, you might have a fallback or a dummy client. + // For now, we re-throw to make the crash explicit. + throw error; } +export const s3Client = s3ClientInstance; + /** * Validates if a hostname is a valid AWS S3 endpoint */ From f9a47358cd3a2802600ba7619bbad5868300abbc Mon Sep 17 00:00:00 2001 From: Claudio Fuentes Date: Thu, 31 Jul 2025 15:56:43 -0400 Subject: [PATCH 3/5] refactor: enhance file upload handling and error management - Refactored the uploadFile action to improve session management and error handling, ensuring proper authorization checks. - Updated TaskBody and CommentItem components to utilize the new uploadFile function, enhancing maintainability and user feedback during file uploads. - Implemented better state management for file uploads, including loading indicators and improved error messages for failed uploads. - Increased maximum file size limit for uploads from 5MB to 10MB, allowing for larger files to be processed. --- apps/app/src/actions/files/upload-file.ts | 124 +++++++------ .../tasks/[taskId]/components/TaskBody.tsx | 172 ++++++++++++------ .../src/components/comments/CommentItem.tsx | 114 ++++++------ 3 files changed, 242 insertions(+), 168 deletions(-) diff --git a/apps/app/src/actions/files/upload-file.ts b/apps/app/src/actions/files/upload-file.ts index 185d81928..f3a0ecbe1 100644 --- a/apps/app/src/actions/files/upload-file.ts +++ b/apps/app/src/actions/files/upload-file.ts @@ -1,6 +1,7 @@ 'use server'; import { BUCKET_NAME, s3Client } from '@/app/s3'; +import { auth } from '@/utils/auth'; import { logger } from '@/utils/logger'; // This log will run as soon as the module is loaded. @@ -10,8 +11,8 @@ import { GetObjectCommand, PutObjectCommand } from '@aws-sdk/client-s3'; import { getSignedUrl } from '@aws-sdk/s3-request-presigner'; import { AttachmentEntityType, AttachmentType, db } from '@db'; import { revalidatePath } from 'next/cache'; +import { headers } from 'next/headers'; import { z } from 'zod'; -import { authActionClient } from '../safe-action'; function mapFileTypeToAttachmentType(fileType: string): AttachmentType { const type = fileType.split('/')[0]; @@ -38,19 +39,18 @@ const uploadAttachmentSchema = z.object({ pathToRevalidate: z.string().optional(), }); -export const uploadFile = authActionClient - .inputSchema(uploadAttachmentSchema) - .metadata({ - name: 'uploadFile', - track: { - event: 'File Uploaded', - channel: 'server', - }, - }) - .action(async ({ parsedInput, ctx }) => { - const { fileName, fileType, fileData, entityId, entityType, pathToRevalidate } = parsedInput; - const { session } = ctx; - const organizationId = session.activeOrganizationId; +export const uploadFile = async (input: z.infer) => { + logger.info(`[uploadFile] Starting upload for ${input.fileName}`); + try { + const { fileName, fileType, fileData, entityId, entityType, pathToRevalidate } = + uploadAttachmentSchema.parse(input); + + const session = await auth.api.getSession({ headers: await headers() }); + const organizationId = session?.session.activeOrganizationId; + + if (!organizationId) { + throw new Error('Not authorized - no organization found'); + } logger.info(`[uploadFile] Starting upload for ${fileName} in org ${organizationId}`); @@ -66,51 +66,55 @@ export const uploadFile = authActionClient const sanitizedFileName = fileName.replace(/[^a-zA-Z0-9.-]/g, '_'); const key = `${organizationId}/attachments/${entityType}/${entityId}/${timestamp}-${sanitizedFileName}`; - try { - logger.info(`[uploadFile] Uploading to S3 with key: ${key}`); - const putCommand = new PutObjectCommand({ - Bucket: BUCKET_NAME, - Key: key, - Body: fileBuffer, - ContentType: fileType, - }); - await s3Client.send(putCommand); - logger.info(`[uploadFile] S3 upload successful for key: ${key}`); - - logger.info(`[uploadFile] Creating attachment record in DB for key: ${key}`); - const attachment = await db.attachment.create({ - data: { - name: fileName, - url: key, - type: mapFileTypeToAttachmentType(fileType), - entityId: entityId, - entityType: entityType, - organizationId: organizationId, - }, - }); - logger.info(`[uploadFile] DB record created with id: ${attachment.id}`); - - logger.info(`[uploadFile] Generating signed URL for key: ${key}`); - const getCommand = new GetObjectCommand({ - Bucket: BUCKET_NAME, - Key: key, - }); - const signedUrl = await getSignedUrl(s3Client, getCommand, { - expiresIn: 900, - }); - logger.info(`[uploadFile] Signed URL generated for key: ${key}`); - - if (pathToRevalidate) { - revalidatePath(pathToRevalidate); - } - - return { + logger.info(`[uploadFile] Uploading to S3 with key: ${key}`); + const putCommand = new PutObjectCommand({ + Bucket: BUCKET_NAME, + Key: key, + Body: fileBuffer, + ContentType: fileType, + }); + await s3Client.send(putCommand); + logger.info(`[uploadFile] S3 upload successful for key: ${key}`); + + logger.info(`[uploadFile] Creating attachment record in DB for key: ${key}`); + const attachment = await db.attachment.create({ + data: { + name: fileName, + url: key, + type: mapFileTypeToAttachmentType(fileType), + entityId: entityId, + entityType: entityType, + organizationId: organizationId, + }, + }); + logger.info(`[uploadFile] DB record created with id: ${attachment.id}`); + + logger.info(`[uploadFile] Generating signed URL for key: ${key}`); + const getCommand = new GetObjectCommand({ + Bucket: BUCKET_NAME, + Key: key, + }); + const signedUrl = await getSignedUrl(s3Client, getCommand, { + expiresIn: 900, + }); + logger.info(`[uploadFile] Signed URL generated for key: ${key}`); + + if (pathToRevalidate) { + revalidatePath(pathToRevalidate); + } + + return { + success: true, + data: { ...attachment, signedUrl, - }; - } catch (error) { - logger.error(`[uploadFile] Error during upload process for key ${key}:`, error); - // Re-throw the error to be handled by the safe action client - throw error; - } - }); + }, + } as const; + } catch (error) { + logger.error(`[uploadFile] Error during upload process:`, error); + return { + success: false, + error: error instanceof Error ? error.message : 'An unknown error occurred.', + } as const; + } +}; diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/TaskBody.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/TaskBody.tsx index 436b4797c..d10bed492 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/TaskBody.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/TaskBody.tsx @@ -7,7 +7,6 @@ import { Textarea } from '@comp/ui/textarea'; import type { Attachment } from '@db'; import { AttachmentEntityType } from '@db'; import { Loader2, Paperclip, Plus } from 'lucide-react'; -import { useAction } from 'next-safe-action/hooks'; import { useRouter } from 'next/navigation'; import type React from 'react'; import { useCallback, useRef, useState } from 'react'; @@ -38,60 +37,127 @@ export function TaskBody({ onAttachmentsChange, }: TaskBodyProps) { const fileInputRef = useRef(null); + const [isUploading, setIsUploading] = useState(false); const [busyAttachmentId, setBusyAttachmentId] = useState(null); const router = useRouter(); - const { execute, isExecuting } = useAction(uploadFile, { - onSuccess: () => { - onAttachmentsChange?.(); - router.refresh(); - toast.success('File uploaded successfully'); - }, - onError: ({ error }) => { - console.error('File upload failed:', error); - toast.error(error.serverError || 'Failed to upload file. Check console for details.'); - }, - onSettled: () => { - if (fileInputRef.current) { - fileInputRef.current.value = ''; - } - }, - }); + + const resetState = () => { + setIsUploading(false); + if (fileInputRef.current) { + fileInputRef.current.value = ''; + } + }; const handleFileSelect = useCallback( async (event: React.ChangeEvent) => { const files = event.target.files; if (!files || files.length === 0) return; + setIsUploading(true); + try { + for (const file of Array.from(files)) { + const MAX_FILE_SIZE_MB = 10; + const MAX_FILE_SIZE_BYTES = MAX_FILE_SIZE_MB * 1024 * 1024; + if (file.size > MAX_FILE_SIZE_BYTES) { + toast.error(`File "${file.name}" exceeds the ${MAX_FILE_SIZE_MB}MB limit.`); + continue; + } - for (const file of Array.from(files)) { - const MAX_FILE_SIZE_MB = 10; - const MAX_FILE_SIZE_BYTES = MAX_FILE_SIZE_MB * 1024 * 1024; - if (file.size > MAX_FILE_SIZE_BYTES) { - toast.error(`File "${file.name}" exceeds the ${MAX_FILE_SIZE_MB}MB limit.`); - continue; + const reader = new FileReader(); + reader.onloadend = async () => { + const base64Data = (reader.result as string)?.split(',')[1]; + if (!base64Data) { + toast.error('Failed to read file data.'); + resetState(); + return; + } + + const result = await uploadFile({ + fileName: file.name, + fileType: file.type, + fileData: base64Data, + entityId: taskId, + entityType: AttachmentEntityType.task, + }); + + if (result.success) { + toast.success('File uploaded successfully.'); + onAttachmentsChange?.(); + router.refresh(); + } else { + console.error('File upload failed:', result.error); + toast.error(result.error || 'Failed to upload file. Check console for details.'); + } + }; + reader.onerror = () => { + toast.error('Error reading file.'); + resetState(); + }; + reader.readAsDataURL(file); } + } finally { + // This finally block might run before all file readers are done. + // It's better to manage the loading state inside the onloadend. + } + }, + [taskId, onAttachmentsChange, router], + ); + + // A better way to handle multiple file uploads + const handleFileSelectMultiple = useCallback( + async (event: React.ChangeEvent) => { + const files = event.target.files; + if (!files || files.length === 0) return; + setIsUploading(true); - const reader = new FileReader(); - reader.onloadend = async () => { - const base64Data = (reader.result as string)?.split(',')[1]; - if (!base64Data) { - toast.error('Failed to read file data.'); - return; + const uploadPromises = Array.from(files).map((file) => { + return new Promise((resolve, reject) => { + const MAX_FILE_SIZE_MB = 10; + const MAX_FILE_SIZE_BYTES = MAX_FILE_SIZE_MB * 1024 * 1024; + if (file.size > MAX_FILE_SIZE_BYTES) { + toast.error(`File "${file.name}" exceeds the ${MAX_FILE_SIZE_MB}MB limit.`); + return resolve(null); // Resolve to skip this file } - execute({ - fileName: file.name, - fileType: file.type, - fileData: base64Data, - entityId: taskId, - entityType: AttachmentEntityType.task, - }); - }; - reader.onerror = () => { - toast.error('Error reading file.'); - }; - reader.readAsDataURL(file); - } + const reader = new FileReader(); + reader.onloadend = async () => { + try { + const base64Data = (reader.result as string)?.split(',')[1]; + if (!base64Data) { + throw new Error('Failed to read file data.'); + } + const result = await uploadFile({ + fileName: file.name, + fileType: file.type, + fileData: base64Data, + entityId: taskId, + entityType: AttachmentEntityType.task, + }); + if (result.success) { + toast.success(`File "${file.name}" uploaded successfully.`); + resolve(result); + } else { + throw new Error(result.error); + } + } catch (error) { + console.error(`Failed to upload ${file.name}:`, error); + toast.error(`Failed to upload ${file.name}.`); + resolve(null); // Resolve even if there's an error to not break Promise.all + } + }; + reader.onerror = () => { + toast.error(`Error reading file "${file.name}".`); + resolve(null); + }; + reader.readAsDataURL(file); + }); + }); + + await Promise.all(uploadPromises); + + onAttachmentsChange?.(); + router.refresh(); + resetState(); }, - [execute, taskId, onAttachmentsChange, router], + [taskId, onAttachmentsChange, router], ); const triggerFileInput = () => { @@ -136,21 +202,21 @@ export function TaskBody({ onChange={onTitleChange} className="h-auto shrink-0 border-none bg-transparent p-0 md:text-lg font-semibold tracking-tight shadow-none focus-visible:ring-0" placeholder="Task Title" - disabled={disabled || isExecuting || !!busyAttachmentId} + disabled={disabled || isUploading || !!busyAttachmentId} />