diff --git a/apps/app/instrumentation-client.ts b/apps/app/instrumentation-client.ts index 6ea5a015f..e945c0bf3 100644 --- a/apps/app/instrumentation-client.ts +++ b/apps/app/instrumentation-client.ts @@ -3,7 +3,13 @@ import { initBotId } from 'botid/client/core'; initBotId({ protect: [ { path: '/api/chat', method: 'POST' }, - { path: '/api/tasks-automations/chat', method: 'POST' }, - { path: '/api/tasks-automations/errors', method: 'POST' }, + { + path: `${process.env.NEXT_PUBLIC_ENTERPRISE_API_URL}/api/tasks-automations/chat`, + method: 'POST', + }, + { + path: `${process.env.NEXT_PUBLIC_ENTERPRISE_API_URL}/api/tasks-automations/errors`, + method: 'POST', + }, ], }); diff --git a/apps/app/package.json b/apps/app/package.json index e746ddbf4..10b39d0fa 100644 --- a/apps/app/package.json +++ b/apps/app/package.json @@ -29,6 +29,8 @@ "@monaco-editor/react": "^4.7.0", "@nangohq/frontend": "^0.53.2", "@next/third-parties": "^15.3.1", + "@novu/api": "^1.6.0", + "@novu/nextjs": "^3.10.1", "@number-flow/react": "^0.5.9", "@prisma/client": "^6.13.0", "@prisma/instrumentation": "6.6.0", diff --git a/apps/app/src/actions/policies/accept-requested-policy-changes.ts b/apps/app/src/actions/policies/accept-requested-policy-changes.ts index 1ec426539..63d66c423 100644 --- a/apps/app/src/actions/policies/accept-requested-policy-changes.ts +++ b/apps/app/src/actions/policies/accept-requested-policy-changes.ts @@ -1,7 +1,6 @@ 'use server'; import { db, PolicyStatus } from '@db'; -import { sendPolicyNotificationEmail } from '@trycompai/email'; import { revalidatePath, revalidateTag } from 'next/cache'; import { z } from 'zod'; import { authActionClient } from '../safe-action'; @@ -72,6 +71,7 @@ export const acceptRequestedPolicyChangesAction = authActionClient approverId: null, signedBy: [], // Clear the signedBy field lastPublishedAt: new Date(), // Update last published date + reviewDate: new Date(), // Update reviewDate to current date }, }); @@ -92,56 +92,34 @@ export const acceptRequestedPolicyChangesAction = authActionClient return roles.includes('employee'); }); - // Send notification emails to all employees - // Send emails in batches of 2 per second to respect rate limit - const BATCH_SIZE = 2; - const delay = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); - - const sendEmailsInBatches = async () => { - for (let i = 0; i < employeeMembers.length; i += BATCH_SIZE) { - const batch = employeeMembers.slice(i, i + BATCH_SIZE); - - await Promise.all( - batch.map(async (employee) => { - if (!employee.user.email) return; - - let notificationType: 'new' | 're-acceptance' | 'updated'; - const wasAlreadySigned = policy.signedBy.includes(employee.id); - if (isNewPolicy) { - notificationType = 'new'; - } else if (wasAlreadySigned) { - notificationType = 're-acceptance'; - } else { - notificationType = 'updated'; - } - - try { - await sendPolicyNotificationEmail({ - email: employee.user.email, - userName: employee.user.name || employee.user.email || 'Employee', - policyName: policy.name, - organizationName: policy.organization.name, - organizationId: session.activeOrganizationId, - notificationType, - }); - } catch (emailError) { - console.error(`Failed to send email to ${employee.user.email}:`, emailError); - // Don't fail the whole operation if email fails - } - }), - ); - - // Only delay if there are more emails to send - if (i + BATCH_SIZE < employeeMembers.length) { - await delay(1000); // wait 1 second between batches - } - } - }; - - // Fire and forget, but log errors if any - sendEmailsInBatches().catch((error) => { - console.error('Some emails failed to send:', error); - }); + // Call /api/send-policy-email to send emails to employees + + // Prepare the events array for the API + const events = employeeMembers + .filter((employee) => employee.user.email) + .map((employee) => ({ + subscriberId: `${employee.user.id}-${session.activeOrganizationId}`, + email: employee.user.email, + userName: employee.user.name || employee.user.email || 'Employee', + policyName: policy.name, + organizationName: policy.organization.name, + url: `${process.env.NEXT_PUBLIC_APP_URL ?? 'https://app.trycomp.ai'}/${session.activeOrganizationId}/policies/${policy.id}`, + description: `The "${policy.name}" policy has been ${isNewPolicy ? 'created' : 'updated'}.`, + })); + + // Call the API route to send the emails + try { + await fetch(`${process.env.BETTER_AUTH_URL ?? ''}/api/send-policy-email`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(events), + }); + } catch (error) { + console.error('Failed to call /api/send-policy-email:', error); + // Don't throw, just log + } // If a comment was provided, create a comment if (comment && comment.trim() !== '') { diff --git a/apps/app/src/ai/constants.ts b/apps/app/src/ai/constants.ts deleted file mode 100644 index 22616fc8a..000000000 --- a/apps/app/src/ai/constants.ts +++ /dev/null @@ -1,25 +0,0 @@ -import { type GatewayModelId } from '@ai-sdk/gateway'; - -export enum Models { - AmazonNovaPro = 'amazon/nova-pro', - AnthropicClaude4Sonnet = 'anthropic/claude-4-sonnet', - GoogleGeminiFlash = 'google/gemini-2.5-flash', - MoonshotKimiK2 = 'moonshotai/kimi-k2', - OpenAIGPT5 = 'openai/gpt-5', - OpenAIGPT5Mini = 'openai/gpt-5-mini', - OpenAIGPT4oMini = 'openai/gpt-4o-mini', - XaiGrok3Fast = 'xai/grok-3-fast', -} - -export const DEFAULT_MODEL = Models.OpenAIGPT5Mini; - -export const SUPPORTED_MODELS: GatewayModelId[] = [ - Models.AmazonNovaPro, - Models.AnthropicClaude4Sonnet, - Models.GoogleGeminiFlash, - Models.MoonshotKimiK2, - Models.OpenAIGPT5, - Models.OpenAIGPT5Mini, - Models.OpenAIGPT4oMini, - Models.XaiGrok3Fast, -]; diff --git a/apps/app/src/ai/secrets.ts b/apps/app/src/ai/secrets.ts deleted file mode 100644 index 3c5de21a5..000000000 --- a/apps/app/src/ai/secrets.ts +++ /dev/null @@ -1,98 +0,0 @@ -// Central registry for standard secrets used by automations and tools. -// AI-friendly design: a flat list with simple fields and helper lookups. - -export type SecretProvider = 'github'; - -export interface SecretEntry { - id: string; // stable identifier, e.g. 'github.token' - provider: SecretProvider; - name: string; // short name within provider, e.g. 'token' - envVar: string; // environment variable name, e.g. 'GITHUB_TOKEN' - description: string; - required: boolean; - docsUrl?: string; - aliases?: readonly string[]; // additional phrases an AI/user might use -} - -export const SECRETS: readonly SecretEntry[] = [ - { - id: 'github.token', - provider: 'github', - name: 'token', - envVar: 'GITHUB_TOKEN', - description: - 'GitHub token (PAT or App installation token) with read access to repository contents and metadata.', - required: true, - docsUrl: - 'https://docs.github.com/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token', - aliases: ['github token', 'gh token', 'github_pat', 'github personal access token'], - }, -] as const; - -// Lightweight indexes for fast lookup -const SECRET_BY_ID: Readonly> = Object.freeze( - Object.fromEntries(SECRETS.map((s) => [s.id, s])), -); - -const SECRET_BY_ENV: Readonly> = Object.freeze( - Object.fromEntries(SECRETS.map((s) => [s.envVar.toUpperCase(), s])), -); - -export function listSecrets(): readonly SecretEntry[] { - return SECRETS; -} - -export function listProviderSecrets(provider: SecretProvider): readonly SecretEntry[] { - return SECRETS.filter((s) => s.provider === provider); -} - -export function getSecretById(id: string): SecretEntry | undefined { - return SECRET_BY_ID[id]; -} - -export function getSecretByEnvVar(envVar: string): SecretEntry | undefined { - return SECRET_BY_ENV[envVar.toUpperCase()]; -} - -export function getEnvVarNameById(id: string): string | undefined { - return getSecretById(id)?.envVar; -} - -// Flexible resolver that accepts: id, env var, provider.name, or an alias phrase -export function resolveSecretIdentifier(identifier: string): SecretEntry | undefined { - const raw = identifier.trim(); - if (!raw) return undefined; - - // Exact id - const byId = getSecretById(raw); - if (byId) return byId; - - // Exact env var - const byEnv = getSecretByEnvVar(raw); - if (byEnv) return byEnv; - - const normalized = raw.toLowerCase().replace(/\s+/g, ' ').trim(); - - // provider.name form - const dotIdx = normalized.indexOf('.'); - if (dotIdx > 0) { - const provider = normalized.slice(0, dotIdx); - const name = normalized.slice(dotIdx + 1); - const match = SECRETS.find( - (s) => s.provider === (provider as SecretProvider) && s.name.toLowerCase() === name, - ); - if (match) return match; - } - - // Alias match - const byAlias = SECRETS.find((s) => - (s.aliases ?? []).some((a) => a.toLowerCase() === normalized), - ); - if (byAlias) return byAlias; - - // Provider-keyword fallback: e.g., 'github token' - const byTokens = SECRETS.find( - (s) => normalized.includes(s.provider) && normalized.includes(s.name.toLowerCase()), - ); - return byTokens; -} diff --git a/apps/app/src/ai/tools/create-sandbox.md b/apps/app/src/ai/tools/create-sandbox.md deleted file mode 100644 index f8cf0075f..000000000 --- a/apps/app/src/ai/tools/create-sandbox.md +++ /dev/null @@ -1,55 +0,0 @@ -Use this tool to create a new Vercel Sandbox — an ephemeral, isolated Linux container that serves as your development environment for the current session. This sandbox provides a secure workspace where you can upload files, install dependencies, run commands, start development servers, and preview web apps. Each sandbox is uniquely identified and must be referenced for all subsequent operations (e.g., file generation, command execution, or URL access). - -## When to Use This Tool - -Use this tool **once per session** when: - -1. You begin working on a new user request that requires code execution or file creation -2. No sandbox currently exists for the session -3. The user asks to start a new project, scaffold an application, or test code in a live environment -4. The user requests a fresh or reset environment - -## Sandbox Capabilities - -After creation, the sandbox allows you to: - -- Upload and manage files via `Generate Files` -- Execute shell commands with `Run Command` and `Wait Command` -- Access running servers through public URLs using `Get Sandbox URL` - -Each sandbox mimics a real-world development environment and supports rapid iteration and testing without polluting the local system. The base system is Amazon Linux 2023 with the following additional packages: - -``` -bind-utils bzip2 findutils git gzip iputils libicu libjpeg libpng ncurses-libs openssl openssl-libs pnpm procps tar unzip which whois zstd -``` - -You can install additional packages using the `dnf` package manager. You can NEVER use port 8080 as it is reserved for internal applications. When requested, you need to use a different port. - -## Best Practices - -- Create the sandbox at the beginning of the session or when the user initiates a coding task -- Track and reuse the sandbox ID throughout the session -- Do not create a second sandbox unless explicitly instructed -- If the user requests an environment reset, you may create a new sandbox **after confirming their intent** - -## Examples of When to Use This Tool - - -User: Can we start fresh? I want to rebuild the project from scratch. -Assistant: Got it — I’ll create a new sandbox so we can start clean. -*Calls Create Sandbox* - - -## When NOT to Use This Tool - -Skip using this tool when: - -1. A sandbox has already been created for the current session -2. You only need to upload files (use Generate Files) -3. You want to execute or wait for a command (use Run Command / Wait Command) -4. You want to preview the application (use Get Sandbox URL) -5. The user hasn’t asked to reset the environment - -## Summary - -Use Create Sandbox to initialize a secure, temporary development environment — but **only once per session**. Treat the sandbox as the core workspace for all follow-up actions unless the user explicitly asks to discard and start anew. diff --git a/apps/app/src/ai/tools/create-sandbox.ts b/apps/app/src/ai/tools/create-sandbox.ts deleted file mode 100644 index 64c1ee7a9..000000000 --- a/apps/app/src/ai/tools/create-sandbox.ts +++ /dev/null @@ -1,75 +0,0 @@ -import { Sandbox } from '@vercel/sandbox'; -import type { UIMessage, UIMessageStreamWriter } from 'ai'; -import { tool } from 'ai'; -import z from 'zod/v3'; -import type { DataPart } from '../messages/data-parts'; -import description from './create-sandbox.md'; -import { getRichError } from './get-rich-error'; - -interface Params { - writer: UIMessageStreamWriter>; -} - -export const createSandbox = ({ writer }: Params) => - tool({ - description, - inputSchema: z.object({ - timeout: z - .number() - .min(600000) - .max(2700000) - .optional() - .describe( - 'Maximum time in milliseconds the Vercel Sandbox will remain active before automatically shutting down. Minimum 600000ms (10 minutes), maximum 2700000ms (45 minutes). Defaults to 600000ms (10 minutes). The sandbox will terminate all running processes when this timeout is reached.', - ), - ports: z - .array(z.number()) - .max(2) - .optional() - .describe( - 'Array of network ports to expose and make accessible from outside the Vercel Sandbox. These ports allow web servers, APIs, or other services running inside the Vercel Sandbox to be reached externally. Common ports include 3000 (Next.js), 8000 (Python servers), 5000 (Flask), etc.', - ), - }), - execute: async ({ timeout, ports }, { toolCallId }) => { - writer.write({ - id: toolCallId, - type: 'data-create-sandbox', - data: { status: 'loading' }, - }); - - try { - const sandbox = await Sandbox.create({ - timeout: timeout ?? 600000, - ports, - }); - - writer.write({ - id: toolCallId, - type: 'data-create-sandbox', - data: { sandboxId: sandbox.sandboxId, status: 'done' }, - }); - - return ( - `Sandbox created with ID: ${sandbox.sandboxId}.` + - `\nYou can now upload files, run commands, and access services on the exposed ports.` - ); - } catch (error) { - const richError = getRichError({ - action: 'Creating Sandbox', - error, - }); - - writer.write({ - id: toolCallId, - type: 'data-create-sandbox', - data: { - error: { message: richError.error.message }, - status: 'error', - }, - }); - - console.log('Error creating Sandbox:', richError.error); - return richError.message; - } - }, - }); diff --git a/apps/app/src/ai/tools/generate-files.md b/apps/app/src/ai/tools/generate-files.md deleted file mode 100644 index 6734757ad..000000000 --- a/apps/app/src/ai/tools/generate-files.md +++ /dev/null @@ -1,74 +0,0 @@ -Use this tool to generate and upload code files into an existing Vercel Sandbox. It leverages an LLM to create file contents based on the current conversation context and user intent, then writes them directly into the sandbox file system. - -The generated files should be considered correct on first iteration and suitable for immediate use in the sandbox environment. This tool is essential for scaffolding applications, adding new features, writing configuration files, or fixing missing components. - -All file paths must be relative to the sandbox root (e.g., `src/index.ts`, `package.json`, `components/Button.tsx`). - -## When to Use This Tool - -Use Generate Files when: - -1. You need to create one or more new files as part of a feature, scaffold, or fix -2. The user requests code that implies file creation (e.g., new routes, APIs, components, services) -3. You need to bootstrap a new application structure inside a sandbox -4. You’re completing a multi-step task that involves generating or updating source code -5. A prior command failed due to a missing file, and you need to supply it - -## File Generation Guidelines - -- Every file must be complete, valid, and runnable where applicable -- File contents must reflect the user’s intent and the overall session context -- File paths must be well-structured and use consistent naming conventions -- Generated files should assume compatibility with other existing files in the sandbox - -### Lambda (.js) files in `lambdas/` MUST follow this structure - -- Line 1 must be: `module.exports = async (event) => {` -- No code/comments/imports before that line -- No code after the closing `};` -- All requires and helper functions must be inside the exported function -- No `process.env`; use `getSecret` inside the function - -## Best Practices - -- Avoid redundant file generation if the file already exists and is unchanged -- If a file was already created earlier in the session at the same path, do NOT regenerate it unless the user asked to update it. Instead, confirm the existing path and proceed to next steps. -- Do NOT create a new file path to work around the above rule. If content is wrong, fix the existing file at the same path, or ask the user to confirm overwriting. -- If you detect incorrect content (e.g., `process.env` usage, export shape), you SHOULD automatically update the existing file to comply with the authoring rules. Do not ask for permission for these safe, corrective edits. -- Use conventional file/folder structures for the tech stack in use -- If replacing an existing file, ensure the update fully satisfies the user’s request - -## Examples of When to Use This Tool - - -User: Add a `NavBar.tsx` component and include it in `App.tsx` -Assistant: I’ll generate the `NavBar.tsx` file and update `App.tsx` to include it. -*Uses Generate Files to create:* -- `components/NavBar.tsx` -- Modified `App.tsx` with import and usage of `NavBar` - - - -User: Let’s scaffold a simple Express server with a `/ping` route. -Assistant: I’ll generate the necessary files to start the Express app. -*Uses Generate Files to create:* -- `package.json` with Express as a dependency -- `index.js` with basic server and `/ping` route - - -## When NOT to Use This Tool - -Avoid using this tool when: - -1. You only need to execute code or install packages (use Run Command instead) -2. You’re waiting for a command to finish (use Wait Command) -3. You want to preview a running server or UI (use Get Sandbox URL) -4. You haven’t created a sandbox yet (use Create Sandbox first) - -## Output Behavior - -After generation, the tool will return a list of the files created, including their paths and contents. These can then be inspected, referenced, or used in subsequent commands. - -## Summary - -Use Generate Files to programmatically create or update files in your Vercel Sandbox. It enables fast iteration, contextual coding, and dynamic file management — all driven by user intent and conversation context. diff --git a/apps/app/src/ai/tools/generate-files.ts b/apps/app/src/ai/tools/generate-files.ts deleted file mode 100644 index a41642ab9..000000000 --- a/apps/app/src/ai/tools/generate-files.ts +++ /dev/null @@ -1,105 +0,0 @@ -import { Sandbox } from '@vercel/sandbox'; -import type { UIMessage, UIMessageStreamWriter } from 'ai'; -import { tool } from 'ai'; -import z from 'zod/v3'; -import type { DataPart } from '../messages/data-parts'; -import description from './generate-files.md'; -import { getContents, type File } from './generate-files/get-contents'; -import { getWriteFiles } from './generate-files/get-write-files'; -import { getRichError } from './get-rich-error'; - -interface Params { - modelId: string; - writer: UIMessageStreamWriter>; -} - -export const generateFiles = ({ writer, modelId }: Params) => - tool({ - description, - inputSchema: z.object({ - sandboxId: z.string(), - paths: z.array(z.string()), - }), - execute: async ({ sandboxId, paths }, { toolCallId, messages }) => { - writer.write({ - id: toolCallId, - type: 'data-generating-files', - data: { paths: [], status: 'generating' }, - }); - - let sandbox: Sandbox | null = null; - - try { - sandbox = await Sandbox.get({ sandboxId }); - } catch (error) { - const richError = getRichError({ - action: 'get sandbox by id', - args: { sandboxId }, - error, - }); - - writer.write({ - id: toolCallId, - type: 'data-generating-files', - data: { error: richError.error, paths: [], status: 'error' }, - }); - - return richError.message; - } - - const writeFiles = getWriteFiles({ sandbox, toolCallId, writer }); - const iterator = getContents({ messages, modelId, paths }); - const uploaded: File[] = []; - - try { - for await (const chunk of iterator) { - if (chunk.files.length > 0) { - const error = await writeFiles(chunk); - if (error) { - return error; - } else { - uploaded.push(...chunk.files); - } - } else { - writer.write({ - id: toolCallId, - type: 'data-generating-files', - data: { - status: 'generating', - paths: chunk.paths, - }, - }); - } - } - } catch (error) { - const richError = getRichError({ - action: 'generate file contents', - args: { modelId, paths }, - error, - }); - - writer.write({ - id: toolCallId, - type: 'data-generating-files', - data: { - error: richError.error, - status: 'error', - paths, - }, - }); - - return richError.message; - } - - writer.write({ - id: toolCallId, - type: 'data-generating-files', - data: { paths: uploaded.map((file) => file.path), status: 'done' }, - }); - - return `Successfully generated and uploaded ${ - uploaded.length - } files. Their paths and contents are as follows: - ${uploaded.map((file) => `Path: ${file.path}\nContent: ${file.content}\n`).join('\n')}`; - }, - }); diff --git a/apps/app/src/ai/tools/generate-files/get-write-files.ts b/apps/app/src/ai/tools/generate-files/get-write-files.ts deleted file mode 100644 index ac447c945..000000000 --- a/apps/app/src/ai/tools/generate-files/get-write-files.ts +++ /dev/null @@ -1,59 +0,0 @@ -import type { DataPart } from '../../messages/data-parts' -import type { File } from './get-contents' -import type { Sandbox } from '@vercel/sandbox' -import type { UIMessageStreamWriter, UIMessage } from 'ai' -import { getRichError } from '../get-rich-error' - -interface Params { - sandbox: Sandbox - toolCallId: string - writer: UIMessageStreamWriter> -} - -export function getWriteFiles({ sandbox, toolCallId, writer }: Params) { - return async function writeFiles(params: { - written: string[] - files: File[] - paths: string[] - }) { - const paths = params.written.concat(params.files.map((file) => file.path)) - writer.write({ - id: toolCallId, - type: 'data-generating-files', - data: { paths, status: 'uploading' }, - }) - - try { - await sandbox.writeFiles( - params.files.map((file) => ({ - content: Buffer.from(file.content, 'utf8'), - path: file.path, - })) - ) - } catch (error) { - const richError = getRichError({ - action: 'write files to sandbox', - args: params, - error, - }) - - writer.write({ - id: toolCallId, - type: 'data-generating-files', - data: { - error: richError.error, - status: 'error', - paths: params.paths, - }, - }) - - return richError.message - } - - writer.write({ - id: toolCallId, - type: 'data-generating-files', - data: { paths, status: 'uploaded' }, - }) - } -} diff --git a/apps/app/src/ai/tools/get-rich-error.ts b/apps/app/src/ai/tools/get-rich-error.ts deleted file mode 100644 index 354313143..000000000 --- a/apps/app/src/ai/tools/get-rich-error.ts +++ /dev/null @@ -1,43 +0,0 @@ -import { APIError } from '@vercel/sandbox/dist/api-client/api-error' - -interface Params { - args?: Record - action: string - error: unknown -} - -/** - * Allows to parse a thrown error to check its metadata and construct a rich - * message that can be handed to the LLM. - */ -export function getRichError({ action, args, error }: Params) { - const fields = getErrorFields(error) - let message = `Error during ${action}: ${fields.message}` - if (args) message += `\nParameters: ${JSON.stringify(args, null, 2)}` - if (fields.json) message += `\nJSON: ${JSON.stringify(fields.json, null, 2)}` - if (fields.text) message += `\nText: ${fields.text}` - return { - message: message, - error: fields, - } -} - -function getErrorFields(error: unknown) { - if (!(error instanceof Error)) { - return { - message: String(error), - json: error, - } - } else if (error instanceof APIError) { - return { - message: error.message, - json: error.json, - text: error.text, - } - } else { - return { - message: error.message, - json: error, - } - } -} diff --git a/apps/app/src/ai/tools/get-sandbox-url.md b/apps/app/src/ai/tools/get-sandbox-url.md deleted file mode 100644 index 507685567..000000000 --- a/apps/app/src/ai/tools/get-sandbox-url.md +++ /dev/null @@ -1,52 +0,0 @@ -Use this tool to retrieve a publicly accessible URL for a specific port that was exposed during the creation of a Vercel Sandbox. This allows users (and the assistant) to preview web applications, access APIs, or interact with services running inside the sandbox via HTTP. - -⚠️ The requested port must have been explicitly declared when the sandbox was created. If the port was not exposed at sandbox creation time, this tool will NOT work for that port. - -## When to Use This Tool - -Use Get Sandbox URL when: - -1. A service or web server is running on a port that was exposed during sandbox creation -2. You need to share a live preview link with the user -3. You want to access a running server inside the sandbox via HTTP -4. You need to programmatically test or call an internal endpoint running in the sandbox - -## Critical Requirements - -- The port must have been **explicitly exposed** in the `Create Sandbox` step - - Example: `ports: [3000]` -- The command serving on that port must be actively running - - Use `Run Command` followed by `Wait Command` (if needed) to start the server - -## Best Practices - -- Only call this tool after the server process has successfully started -- Use typical ports based on framework defaults (e.g., 3000 for Next.js, 5173 for Vite, 8080 for Node APIs) -- If multiple services run on different ports, ensure each port was exposed up front during sandbox creation -- Don’t attempt to expose or discover ports dynamically after creation — only predefined ports are valid - -## When NOT to Use This Tool - -Avoid using this tool when: - -1. The port was **not declared** during sandbox creation — it will not be accessible -2. No server is running on the specified port -3. You haven't started the service yet or haven't waited for it to boot up -4. You are referencing a transient script or CLI command (not a persistent server) - -## Example - - -User: Can I preview the app after it's built? -Assistant: -1. Create Sandbox: expose port 3000 -2. Generate Files: scaffold the app -3. Run Command: `npm run dev` -4. (Optional) Wait Command -5. Get Sandbox URL: port 3000 -→ Returns: a public URL the user can open in a browser - - -## Summary - -Use Get Sandbox URL to access live previews of services running inside the sandbox — but only for ports that were explicitly exposed during sandbox creation. If the port wasn’t declared, it will not be accessible externally. diff --git a/apps/app/src/ai/tools/get-sandbox-url.ts b/apps/app/src/ai/tools/get-sandbox-url.ts deleted file mode 100644 index a809dfe11..000000000 --- a/apps/app/src/ai/tools/get-sandbox-url.ts +++ /dev/null @@ -1,45 +0,0 @@ -import { Sandbox } from '@vercel/sandbox'; -import type { UIMessage, UIMessageStreamWriter } from 'ai'; -import { tool } from 'ai'; -import z from 'zod/v3'; -import type { DataPart } from '../messages/data-parts'; -import description from './get-sandbox-url.md'; - -interface Params { - writer: UIMessageStreamWriter>; -} - -export const getSandboxURL = ({ writer }: Params) => - tool({ - description, - inputSchema: z.object({ - sandboxId: z - .string() - .describe( - "The unique identifier of the Vercel Sandbox (e.g., 'sbx_abc123xyz'). This ID is returned when creating a Vercel Sandbox and is used to reference the specific sandbox instance.", - ), - port: z - .number() - .describe( - 'The port number where a service is running inside the Vercel Sandbox (e.g., 3000 for Next.js dev server, 8000 for Python apps, 5000 for Flask). The port must have been exposed when the sandbox was created or when running commands.', - ), - }), - execute: async ({ sandboxId, port }, { toolCallId }) => { - writer.write({ - id: toolCallId, - type: 'data-get-sandbox-url', - data: { status: 'loading' }, - }); - - const sandbox = await Sandbox.get({ sandboxId }); - const url = sandbox.domain(port); - - writer.write({ - id: toolCallId, - type: 'data-get-sandbox-url', - data: { url, status: 'done' }, - }); - - return { url }; - }, - }); diff --git a/apps/app/src/ai/tools/index.ts b/apps/app/src/ai/tools/index.ts deleted file mode 100644 index cf1f23bb3..000000000 --- a/apps/app/src/ai/tools/index.ts +++ /dev/null @@ -1,24 +0,0 @@ -import type { InferUITools, UIMessage, UIMessageStreamWriter } from 'ai'; -import type { DataPart } from '../messages/data-parts'; -import { createSandbox } from './create-sandbox'; -import { generateFiles } from './generate-files'; -import { getSandboxURL } from './get-sandbox-url'; -import { runCommand } from './run-command'; -import { storeToS3 } from './store-to-s3'; - -interface Params { - modelId: string; - writer: UIMessageStreamWriter>; -} - -export function tools({ modelId, writer }: Params) { - return { - createSandbox: createSandbox({ writer }), - generateFiles: generateFiles({ writer, modelId }), - getSandboxURL: getSandboxURL({ writer }), - runCommand: runCommand({ writer }), - storeToS3: storeToS3({ writer }), - }; -} - -export type ToolSet = InferUITools>; diff --git a/apps/app/src/ai/tools/run-command.md b/apps/app/src/ai/tools/run-command.md deleted file mode 100644 index 2dafae2ed..000000000 --- a/apps/app/src/ai/tools/run-command.md +++ /dev/null @@ -1,67 +0,0 @@ -Use this tool to run a command inside an existing Vercel Sandbox. You can choose whether the command should block until completion or run in the background by setting the `wait` parameter: - -- `wait: true` → Command runs and **must complete** before the response is returned. -- `wait: false` → Command starts in the background, and the response returns immediately with its `commandId`. - -⚠️ Commands are stateless — each one runs in a fresh shell session with **no memory** of previous commands. You CANNOT rely on `cd`, but other state like shell exports or background processes from prior commands should be available. - -## When to Use This Tool - -Use Run Command when: - -1. You need to install dependencies (e.g., `pnpm install`) -2. You want to run a build or test process (e.g., `pnpm build`, `vite build`) -3. You need to launch a development server or long-running process -4. You need to compile or execute code within the sandbox -5. You want to run a task in the background without blocking the session - -## Sequencing Rules - -- If two commands depend on each other, **set `wait: true` on the first** to ensure it finishes before starting the second - - ✅ Good: Run `pnpm install` with `wait: true` → then run `pnpm dev` - - ❌ Bad: Run both with `wait: false` and expect them to be sequential -- Do **not** issue multiple sequential commands in one call - - ❌ `cd src && node index.js` - - ✅ `node src/index.js` -- Do **not** assume directory state is preserved — use full relative paths - -## Command Format - -- Separate the base command from its arguments - - ✅ `{ command: "pnpm", args: ["install", "--verbose"], wait: true }` - - ❌ `{ command: "pnpm install --verbose" }` -- Avoid shell syntax like pipes, redirections, or `&&`. If unavoidable, ensure it works in a stateless, single-session execution - -## When to Set `wait` to True - -- The next step depends on the result of the command -- The command must finish before accessing its output -- Example: Installing dependencies before building, compiling before running tests - -## When to Set `wait` to False - -- The command is intended to stay running indefinitely (e.g., a dev server) -- The command has no impact on subsequent operations (e.g., printing logs) - -## Other Rules - -- When running `pnpm dev` in a Next.js or Vite project, HMR can handle updates so generally you don't need to kill the server process and start it again after changing files. - -## Examples - - -User: Install dependencies and then run the dev server -Assistant: -1. Run Command: `{ command: "pnpm", args: ["install"], wait: true }` -2. Run Command: `{ command: "pnpm", args: ["run", "dev"], wait: false }` - - - -User: Build the app with Vite -Assistant: -Run Command: `{ command: "vite", args: ["build"], wait: true }` - - -## Summary - -Use Run Command to start shell commands in the sandbox, controlling execution flow with the `wait` flag. Commands are stateless and isolated — use relative paths, and only run long-lived processes with `wait: false`. diff --git a/apps/app/src/ai/tools/run-command.ts b/apps/app/src/ai/tools/run-command.ts deleted file mode 100644 index 7f12cc1ca..000000000 --- a/apps/app/src/ai/tools/run-command.ts +++ /dev/null @@ -1,193 +0,0 @@ -import { Command, Sandbox } from '@vercel/sandbox'; -import type { UIMessage, UIMessageStreamWriter } from 'ai'; -import { tool } from 'ai'; -import z from 'zod/v3'; -import type { DataPart } from '../messages/data-parts'; -import { getRichError } from './get-rich-error'; -import description from './run-command.md'; - -interface Params { - writer: UIMessageStreamWriter>; -} - -export const runCommand = ({ writer }: Params) => - tool({ - description, - inputSchema: z.object({ - sandboxId: z.string().describe('The ID of the Vercel Sandbox to run the command in'), - command: z - .string() - .describe( - "The base command to run (e.g., 'npm', 'node', 'python', 'ls', 'cat'). Do NOT include arguments here. IMPORTANT: Each command runs independently in a fresh shell session - there is no persistent state between commands. You cannot use 'cd' to change directories for subsequent commands.", - ), - args: z - .array(z.string()) - .optional() - .describe( - "Array of arguments for the command. Each argument should be a separate string (e.g., ['install', '--verbose'] for npm install --verbose, or ['src/index.js'] to run a file, or ['-la', './src'] to list files). IMPORTANT: Use relative paths (e.g., 'src/file.js') or absolute paths instead of trying to change directories with 'cd' first, since each command runs in a fresh shell session.", - ), - sudo: z.boolean().optional().describe('Whether to run the command with sudo'), - wait: z - .boolean() - .describe( - 'Whether to wait for the command to finish before returning. If true, the command will block until it completes, and you will receive its output.', - ), - }), - execute: async ({ sandboxId, command, sudo, wait, args = [] }, { toolCallId }) => { - writer.write({ - id: toolCallId, - type: 'data-run-command', - data: { sandboxId, command, args, status: 'executing' }, - }); - - let sandbox: Sandbox | null = null; - - try { - sandbox = await Sandbox.get({ sandboxId }); - } catch (error) { - const richError = getRichError({ - action: 'get sandbox by id', - args: { sandboxId }, - error, - }); - - writer.write({ - id: toolCallId, - type: 'data-run-command', - data: { - sandboxId, - command, - args, - error: richError.error, - status: 'error', - }, - }); - - return richError.message; - } - - let cmd: Command | null = null; - - try { - cmd = await sandbox.runCommand({ - detached: true, - cmd: command, - args, - sudo, - }); - } catch (error) { - const richError = getRichError({ - action: 'run command in sandbox', - args: { sandboxId }, - error, - }); - - writer.write({ - id: toolCallId, - type: 'data-run-command', - data: { - sandboxId, - command, - args, - error: richError.error, - status: 'error', - }, - }); - - return richError.message; - } - - writer.write({ - id: toolCallId, - type: 'data-run-command', - data: { - sandboxId, - commandId: cmd.cmdId, - command, - args, - status: 'executing', - }, - }); - - if (!wait) { - writer.write({ - id: toolCallId, - type: 'data-run-command', - data: { - sandboxId, - commandId: cmd.cmdId, - command, - args, - status: 'running', - }, - }); - - return `The command \`${command} ${args.join( - ' ', - )}\` has been started in the background in the sandbox with ID \`${sandboxId}\` with the commandId ${ - cmd.cmdId - }.`; - } - - writer.write({ - id: toolCallId, - type: 'data-run-command', - data: { - sandboxId, - commandId: cmd.cmdId, - command, - args, - status: 'waiting', - }, - }); - - const done = await cmd.wait(); - try { - const [stdout, stderr] = await Promise.all([done.stdout(), done.stderr()]); - - writer.write({ - id: toolCallId, - type: 'data-run-command', - data: { - sandboxId, - commandId: cmd.cmdId, - command, - args, - exitCode: done.exitCode, - status: 'done', - }, - }); - - return ( - `The command \`${command} ${args.join( - ' ', - )}\` has finished with exit code ${done.exitCode}.` + - `Stdout of the command was: \n` + - `\`\`\`\n${stdout}\n\`\`\`\n` + - `Stderr of the command was: \n` + - `\`\`\`\n${stderr}\n\`\`\`` - ); - } catch (error) { - const richError = getRichError({ - action: 'wait for command to finish', - args: { sandboxId, commandId: cmd.cmdId }, - error, - }); - - writer.write({ - id: toolCallId, - type: 'data-run-command', - data: { - sandboxId, - commandId: cmd.cmdId, - command, - args, - error: richError.error, - status: 'error', - }, - }); - - return richError.message; - } - }, - }); diff --git a/apps/app/src/app/(app)/[orgId]/controls/[controlId]/loading.tsx b/apps/app/src/app/(app)/[orgId]/controls/[controlId]/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/controls/[controlId]/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/controls/[controlId]/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/controls/loading.tsx b/apps/app/src/app/(app)/[orgId]/controls/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/controls/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/controls/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/frameworks/[frameworkInstanceId]/loading.tsx b/apps/app/src/app/(app)/[orgId]/frameworks/[frameworkInstanceId]/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/frameworks/[frameworkInstanceId]/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/frameworks/[frameworkInstanceId]/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/frameworks/[frameworkInstanceId]/requirements/[requirementKey]/loading.tsx b/apps/app/src/app/(app)/[orgId]/frameworks/[frameworkInstanceId]/requirements/[requirementKey]/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/frameworks/[frameworkInstanceId]/requirements/[requirementKey]/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/frameworks/[frameworkInstanceId]/requirements/[requirementKey]/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/frameworks/loading.tsx b/apps/app/src/app/(app)/[orgId]/frameworks/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/frameworks/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/frameworks/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/integrations/loading.tsx b/apps/app/src/app/(app)/[orgId]/integrations/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/integrations/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/integrations/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/people/[employeeId]/loading.tsx b/apps/app/src/app/(app)/[orgId]/people/[employeeId]/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/people/[employeeId]/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/people/[employeeId]/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/people/all/loading.tsx b/apps/app/src/app/(app)/[orgId]/people/all/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/people/all/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/people/all/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/people/dashboard/loading.tsx b/apps/app/src/app/(app)/[orgId]/people/dashboard/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/people/dashboard/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/people/dashboard/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/people/devices/loading.tsx b/apps/app/src/app/(app)/[orgId]/people/devices/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/people/devices/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/people/devices/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/policies/(overview)/loading.tsx b/apps/app/src/app/(app)/[orgId]/policies/(overview)/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/policies/(overview)/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/policies/(overview)/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/policies/[policyId]/components/PolicyOverview.tsx b/apps/app/src/app/(app)/[orgId]/policies/[policyId]/components/PolicyOverview.tsx index 693ccbc89..2c20745e9 100644 --- a/apps/app/src/app/(app)/[orgId]/policies/[policyId]/components/PolicyOverview.tsx +++ b/apps/app/src/app/(app)/[orgId]/policies/[policyId]/components/PolicyOverview.tsx @@ -37,7 +37,6 @@ export function PolicyOverview({ isPendingApproval: boolean; }) { const { data: activeMember } = authClient.useActiveMember(); - const [, setOpen] = useQueryState('policy-overview-sheet'); const [, setArchiveOpen] = useQueryState('archive-policy-sheet'); const canCurrentUserApprove = policy?.approverId === activeMember?.id; diff --git a/apps/app/src/app/(app)/[orgId]/policies/[policyId]/components/UpdatePolicyOverview.tsx b/apps/app/src/app/(app)/[orgId]/policies/[policyId]/components/UpdatePolicyOverview.tsx index 197c0460f..a83c9b69d 100644 --- a/apps/app/src/app/(app)/[orgId]/policies/[policyId]/components/UpdatePolicyOverview.tsx +++ b/apps/app/src/app/(app)/[orgId]/policies/[policyId]/components/UpdatePolicyOverview.tsx @@ -5,16 +5,14 @@ import { updatePolicyFormAction } from '@/actions/policies/update-policy-form-ac import { SelectAssignee } from '@/components/SelectAssignee'; import { StatusIndicator } from '@/components/status-indicator'; import { Button } from '@comp/ui/button'; -import { Calendar } from '@comp/ui/calendar'; import { cn } from '@comp/ui/cn'; -import { Popover, PopoverContent, PopoverTrigger } from '@comp/ui/popover'; import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@comp/ui/select'; import { Departments, Frequency, Member, type Policy, PolicyStatus, User } from '@db'; import { format } from 'date-fns'; import { CalendarIcon, Loader2 } from 'lucide-react'; import { useAction } from 'next-safe-action/hooks'; import { useRouter } from 'next/navigation'; -import { useRef, useState } from 'react'; +import { useState } from 'react'; import { toast } from 'sonner'; import { SubmitApprovalDialog } from './SubmitApprovalDialog'; @@ -42,11 +40,6 @@ export function UpdatePolicyOverview({ // Track selected assignee const [selectedAssigneeId, setSelectedAssigneeId] = useState(policy.assigneeId); - // Date picker state - UI only - const [isDatePickerOpen, setIsDatePickerOpen] = useState(false); - const [tempDate, setTempDate] = useState(undefined); - const popoverRef = useRef(null); - // Loading state const [isSubmitting, setIsSubmitting] = useState(false); @@ -83,12 +76,6 @@ export function UpdatePolicyOverview({ }, }); - // Function to handle date confirmation - const handleDateConfirm = (date: Date | undefined) => { - setTempDate(date); - setIsDatePickerOpen(false); - }; - // Function to handle form field changes const handleFormChange = () => { setFormInteracted(true); @@ -106,7 +93,7 @@ export function UpdatePolicyOverview({ const reviewFrequency = formData.get('review_frequency') as Frequency; // Get review date from the form or use the existing one - const reviewDate = tempDate || (policy.reviewDate ? new Date(policy.reviewDate) : new Date()); + const reviewDate = policy.reviewDate ? new Date(policy.reviewDate) : new Date(); // Check if the policy is published and if there are changes const isPublishedWithChanges = @@ -150,7 +137,7 @@ export function UpdatePolicyOverview({ const reviewFrequency = formData.get('review_frequency') as Frequency; // Get review date from the form or use the existing one - const reviewDate = tempDate || (policy.reviewDate ? new Date(policy.reviewDate) : new Date()); + const reviewDate = policy.reviewDate ? new Date(policy.reviewDate) : new Date(); setIsSubmitting(true); submitForApproval.execute({ @@ -285,82 +272,32 @@ export function UpdatePolicyOverview({ - { - setIsDatePickerOpen(open); - if (!open) { - setTempDate(undefined); - } - }} - > - + - - - -
- { - setTempDate(date); - handleFormChange(); - }} - disabled={(date) => date <= new Date()} - initialFocus - /> -
- - -
-
-
-
+ {policy.reviewDate ? ( + format(new Date(policy.reviewDate), 'PPP') + ) : ( + None + )} + + + {/* Hidden input to store the date value */} diff --git a/apps/app/src/app/(app)/[orgId]/policies/[policyId]/loading.tsx b/apps/app/src/app/(app)/[orgId]/policies/[policyId]/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/policies/[policyId]/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/policies/[policyId]/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/policies/all/loading.tsx b/apps/app/src/app/(app)/[orgId]/policies/all/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/policies/all/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/policies/all/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/risk/(overview)/loading.tsx b/apps/app/src/app/(app)/[orgId]/risk/(overview)/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/risk/(overview)/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/risk/(overview)/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/risk/[riskId]/loading.tsx b/apps/app/src/app/(app)/[orgId]/risk/[riskId]/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/risk/[riskId]/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/risk/[riskId]/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/settings/api-keys/loading.tsx b/apps/app/src/app/(app)/[orgId]/settings/api-keys/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/settings/api-keys/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/settings/api-keys/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/settings/context-hub/loading.tsx b/apps/app/src/app/(app)/[orgId]/settings/context-hub/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/settings/context-hub/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/settings/context-hub/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/settings/loading.tsx b/apps/app/src/app/(app)/[orgId]/settings/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/settings/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/settings/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/settings/trust-portal/loading.tsx b/apps/app/src/app/(app)/[orgId]/settings/trust-portal/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/settings/trust-portal/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/settings/trust-portal/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/README.md b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/README.md deleted file mode 100644 index 1ef6a2f1e..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/README.md +++ /dev/null @@ -1,179 +0,0 @@ -# Task Automation System - -A comprehensive system for creating, managing, and executing automated task scripts using AI assistance. - -## Overview - -The Task Automation System allows users to: - -- Chat with AI to generate automation scripts -- Store scripts directly in S3 (no sandboxes needed) -- Execute scripts via Trigger.dev -- Visualize automation workflows -- Test scripts with real-time feedback - -## Architecture - -### Directory Structure - -``` -automation/ -├── components/ # UI Components -│ ├── automation/ # Automation-specific components -│ │ └── AutomationTester.tsx -│ ├── workflow/ # Workflow visualization -│ │ └── workflow-visualizer-simple.tsx -│ └── [other components] -├── hooks/ # Custom React Hooks -│ ├── use-task-automation-script.ts -│ ├── use-task-automation-scripts-list.ts -│ ├── use-task-automation-execution.ts -│ └── use-task-automation-workflow.ts -├── lib/ # Core Libraries -│ ├── types/ # TypeScript definitions -│ ├── task-automation-api.ts # API client -│ ├── task-automation-store.ts # Zustand state management -│ └── chat-context.tsx # Chat context provider -├── chat.tsx # Main chat interface -├── page.tsx # Main page component -└── README.md # This file -``` - -### Key Components - -#### 1. **Chat Interface** (`chat.tsx`) - -- AI-powered chat for generating automation scripts -- Uses actual `orgId` and `taskId` (not test constants) -- Sends context to AI including available secrets - -#### 2. **Automation Tester** (`components/automation/AutomationTester.tsx`) - -- Lists all scripts for an organization -- Allows testing scripts with one click -- Shows execution results and logs - -#### 3. **Workflow Visualizer** (`components/workflow/workflow-visualizer-simple.tsx`) - -- Visualizes automation steps -- Parses scripts to extract workflow -- Shows test results in a dialog - -#### 4. **Script Initializer** (`script-initializer.tsx`) - -- Checks for existing scripts on load -- Updates UI state automatically - -### API Routes - -#### `/api/tasks-automations/chat` - -- Handles AI chat interactions -- Uses limited tool set (only `storeToS3`) -- Receives actual `orgId` and `taskId` from frontend - -#### `/api/tasks-automations/s3/*` - -- `/get` - Fetch script content -- `/list` - List organization scripts -- `/upload` - Upload new scripts - -#### `/api/tasks-automations/trigger/execute` - -- Executes scripts via Trigger.dev -- Returns results and logs - -### State Management - -Uses Zustand for global state: - -- `chatStatus` - Current chat state -- `scriptGenerated` - Whether a script exists -- `scriptPath` - S3 path of the script - -### Custom Hooks - -All hooks follow the `useTaskAutomation*` naming convention: - -1. **`useTaskAutomationScript`** - - Fetches individual scripts - - Handles script uploads - - SWR caching - -2. **`useTaskAutomationScriptsList`** - - Lists all scripts for an org - - Auto-refresh capability - -3. **`useTaskAutomationExecution`** - - Executes scripts - - Manages execution state - - Error handling - -4. **`useTaskAutomationWorkflow`** - - Analyzes scripts for workflow steps - - Client-side parsing (can be enhanced with AI) - -## Data Flow - -1. User chats with AI in the chat interface -2. AI generates script and saves directly to S3 using `storeToS3` tool -3. UI detects S3 upload via data mapper and updates state -4. Workflow visualizer fetches and analyzes the script -5. User can test the script, which executes via Trigger.dev -6. Results are displayed in the UI - -## Key Features - -### No Sandboxes - -- Scripts are saved directly to S3 -- No Vercel Sandbox creation or file generation -- Simpler, more direct workflow - -### Real Organization Data - -- Uses actual `orgId` and `taskId` from route -- No hardcoded test constants -- Scripts are properly scoped to organizations - -### Professional Code Organization - -- Consistent naming conventions -- Comprehensive TypeScript types -- JSDoc comments on all public APIs -- Clean separation of concerns - -## Usage - -```tsx -// The main automation page receives orgId and taskId from route params - - - -``` - -## Environment Variables - -Required: - -- `ANTHROPIC_API_KEY` or `OPENAI_API_KEY` - For AI chat -- `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY` - For S3 access -- `TRIGGER_API_KEY` - For Trigger.dev execution - -## Future Enhancements - -1. **AI-Powered Workflow Analysis** - - Replace client-side parsing with AI analysis - - More accurate workflow extraction - -2. **Script Templates** - - Pre-built automation templates - - Quick start options - -3. **Execution History** - - Track all script executions - - Performance metrics - -4. **Collaborative Features** - - Share scripts between team members - - Version control integration diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/actions.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/actions.ts deleted file mode 100644 index 3ffa9d787..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/actions.ts +++ /dev/null @@ -1,18 +0,0 @@ -'use server' - -import { revalidatePath } from 'next/cache' -import { cookies } from 'next/headers' -import ms from 'ms' - -export async function hideBanner() { - const store = await cookies() - - store.set('banner-hidden', 'true', { - httpOnly: true, - secure: process.env.NODE_ENV === 'production', - expires: new Date(Date.now() + ms('30d')), - path: '/', - }) - - revalidatePath('/', 'layout') -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/actions/task-automation-actions.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/actions/task-automation-actions.ts new file mode 100644 index 000000000..7af234d9c --- /dev/null +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/actions/task-automation-actions.ts @@ -0,0 +1,265 @@ +'use server'; + +/** + * Server actions for task automation + * These actions securely call the enterprise API with server-side license key + */ + +import { revalidatePath } from 'next/cache'; +import { headers } from 'next/headers'; + +interface EnterpriseApiResponse { + success: boolean; + data?: T; + error?: string; + message?: string; +} + +class EnterpriseApiError extends Error { + constructor( + message: string, + public status?: number, + ) { + super(message); + this.name = 'EnterpriseApiError'; + } +} + +/** + * Get enterprise API configuration + */ +function getEnterpriseConfig() { + const enterpriseApiUrl = process.env.NEXT_PUBLIC_ENTERPRISE_API_URL || 'http://localhost:3006'; + const enterpriseApiKey = process.env.ENTERPRISE_API_SECRET; + + if (!enterpriseApiKey) { + throw new Error('Not authorized to access enterprise API'); + } + + return { enterpriseApiUrl, enterpriseApiKey }; +} + +/** + * Make authenticated request to enterprise API + */ +async function callEnterpriseApi( + endpoint: string, + options: { + method?: 'GET' | 'POST'; + body?: any; + params?: Record; + } = {}, +): Promise { + const { enterpriseApiUrl, enterpriseApiKey } = getEnterpriseConfig(); + + const url = new URL(endpoint, enterpriseApiUrl); + + if (options.params) { + Object.entries(options.params).forEach(([key, value]) => { + url.searchParams.append(key, value); + }); + } + + const method = options.method || 'GET'; + + const response = await fetch(url.toString(), { + method, + headers: { + 'Content-Type': 'application/json', + 'x-api-secret': enterpriseApiKey, + }, + body: options.body ? JSON.stringify(options.body) : undefined, + }); + + if (!response.ok) { + let errorMessage = `API request failed: ${response.status}`; + try { + const errorData = await response.json(); + errorMessage = errorData.message || errorData.error || errorMessage; + } catch {} + throw new EnterpriseApiError(errorMessage, response.status); + } + + const result: EnterpriseApiResponse = await response.json(); + + if (!result.success && result.error) { + throw new EnterpriseApiError(result.error); + } + + return result.data || (result as T); +} + +/** + * Revalidate current path + */ +async function revalidateCurrentPath() { + const headersList = await headers(); + let path = headersList.get('x-pathname') || headersList.get('referer') || ''; + path = path.replace(/\/[a-z]{2}\//, '/'); + revalidatePath(path); +} + +/** + * Upload automation script + */ +export async function uploadAutomationScript(data: { + orgId: string; + taskId: string; + content: string; + type?: string; +}) { + try { + const result = await callEnterpriseApi('/api/tasks-automations/s3/upload', { + method: 'POST', + body: data, + }); + + await revalidateCurrentPath(); + return { success: true, data: result }; + } catch (error) { + return { + success: false, + error: error instanceof EnterpriseApiError ? error.message : 'Failed to upload script', + }; + } +} + +/** + * Get automation script + */ +export async function getAutomationScript(key: string) { + try { + const result = await callEnterpriseApi('/api/tasks-automations/s3/get', { + params: { key }, + }); + + return { success: true, data: result }; + } catch (error) { + return { + success: false, + error: error instanceof EnterpriseApiError ? error.message : 'Failed to get script', + }; + } +} + +/** + * List automation scripts + */ +export async function listAutomationScripts(orgId: string) { + try { + const result = await callEnterpriseApi('/api/tasks-automations/s3/list', { + params: { orgId }, + }); + + return { success: true, data: result }; + } catch (error) { + const typedError = error as EnterpriseApiError; + + if (typedError.status === 401) { + return { + success: false, + error: 'Unauthorized. Please contact your administrator.', + }; + } + + if (typedError.status === 404) { + return { + success: false, + error: 'Files not found.', + }; + } + + return { + success: false, + error: error instanceof EnterpriseApiError ? error.message : 'Failed to list scripts', + }; + } +} + +/** + * Execute automation script + */ +export async function executeAutomationScript(data: { + orgId: string; + taskId: string; + sandboxId?: string; +}) { + try { + const result = await callEnterpriseApi('/api/tasks-automations/trigger/execute', { + method: 'POST', + body: data, + }); + + await revalidateCurrentPath(); + return { success: true, data: result }; + } catch (error) { + const typedError = error as EnterpriseApiError; + + if (typedError.status === 401) { + return { + success: false, + error: 'Unauthorized. Please contact your administrator.', + }; + } + + return { + success: false, + error: error instanceof EnterpriseApiError ? error.message : 'Failed to execute script', + }; + } +} + +/** + * Analyze workflow + */ +export async function analyzeAutomationWorkflow(scriptContent: string) { + try { + const result = await callEnterpriseApi('/api/tasks-automations/workflow/analyze', { + method: 'POST', + body: { scriptContent }, + }); + + return { success: true, data: result }; + } catch (error) { + const typedError = error as EnterpriseApiError; + + if (typedError.status === 401) { + return { + success: false, + error: 'Unauthorized. Please contact your administrator.', + }; + } + + return { + success: false, + error: error instanceof EnterpriseApiError ? error.message : 'Failed to analyze workflow', + }; + } +} + +export const getAutomationRunStatus = async (runId: string) => { + try { + const result = await callEnterpriseApi('/api/tasks-automations/runs/${runId}', { + params: { runId }, + }); + + return { + success: true, + data: result, + }; + } catch (error) { + const typedError = error as EnterpriseApiError; + + if (typedError.status === 401) { + return { + success: false, + error: 'Unauthorized. Please contact your administrator.', + }; + } + + return { + success: false, + error: error instanceof EnterpriseApiError ? error.message : 'Failed to get run status', + }; + } +}; diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/chat.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/chat.tsx index 9b7cd3ee2..8e4849449 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/chat.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/chat.tsx @@ -1,11 +1,16 @@ 'use client'; -import { Models } from '@/ai/constants'; -import { Github, VercelIcon } from '@/components/ai/icons'; import { cn } from '@/lib/utils'; import { useChat } from '@ai-sdk/react'; +import { + Breadcrumb, + BreadcrumbItem, + BreadcrumbLink, + BreadcrumbList, + BreadcrumbSeparator, +} from '@comp/ui/breadcrumb'; import { Card, CardDescription, CardHeader } from '@comp/ui/card'; -import { ArrowLeft, Cloud, Globe, MessageCircleIcon } from 'lucide-react'; +import { ChevronRight } from 'lucide-react'; import Image from 'next/image'; import Link from 'next/link'; import { useCallback, useEffect, useRef, useState } from 'react'; @@ -26,58 +31,44 @@ interface Props { modelId?: string; orgId: string; taskId: string; + taskName?: string; } -type Category = 'recommended' | 'github' | 'website' | 'vercel' | 'cloudflare'; -const AUTOMATION_CATEGORIES: Category[] = [ - 'recommended', - 'github', - 'website', - 'vercel', - 'cloudflare', -]; - interface Example { title: string; prompt: string; - icon: React.ReactNode; - categories: Category[]; + url: string; } const AUTOMATION_EXAMPLES: Example[] = [ { title: 'Check if I have dependabot enabled in my GitHub repository', prompt: 'Check if I have dependabot enabled in my GitHub repository', - icon: , - categories: ['recommended', 'github'], + url: 'https://img.logo.dev/github.com?token=pk_AZatYxV5QDSfWpRDaBxzRQ', }, { title: 'Check if I have branch protection enabled for the main branch in my GitHub repository', prompt: 'Check if I have branch protection enabled for the main branch in my GitHub repository', - icon: , - categories: ['recommended', 'github'], + url: 'https://img.logo.dev/github.com?token=pk_AZatYxV5QDSfWpRDaBxzRQ', }, { title: 'Check if my website has a privacy policy', prompt: 'Check if my website has a privacy policy', - icon: , - categories: ['recommended', 'website'], + url: 'https://img.logo.dev/trycomp.ai?token=pk_AZatYxV5QDSfWpRDaBxzRQ', }, { title: 'Give me a list of failed deployments in my Vercel project', prompt: 'Give me a list of failed deployments in my Vercel project', - icon: , - categories: ['recommended', 'vercel'], + url: 'https://img.logo.dev/vercel.com?token=pk_AZatYxV5QDSfWpRDaBxzRQ', }, { title: 'Check that DDoS protection is enabled for my Cloudflare project', prompt: 'Check that DDoS protection is enabled for my Cloudflare project', - icon: , - categories: ['recommended', 'cloudflare'], + url: 'https://img.logo.dev/cloudflare.com?token=pk_AZatYxV5QDSfWpRDaBxzRQ', }, ]; -export function Chat({ className, orgId, taskId }: Props) { +export function Chat({ className, orgId, taskId, taskName }: Props) { const [input, setInput] = useState(''); const { chat } = useSharedChatContext(); const { messages, sendMessage, status } = useChat({ chat }); @@ -98,7 +89,7 @@ export function Chat({ className, orgId, taskId }: Props) { if (text.trim()) { sendMessage( { text }, - { body: { modelId: Models.OpenAIGPT5Mini, reasoningEffort: 'medium', orgId, taskId } }, + { body: { modelId: 'openai/gpt-5-mini', reasoningEffort: 'medium', orgId, taskId } }, ); setInput(''); } @@ -113,7 +104,7 @@ export function Chat({ className, orgId, taskId }: Props) { { text: `I've added the secret "${secretName}". You can now use it in the automation script.`, }, - { body: { modelId: Models.OpenAIGPT5Mini, reasoningEffort: 'medium', orgId, taskId } }, + { body: { modelId: 'openai/gpt-5-mini', reasoningEffort: 'medium', orgId, taskId } }, ); }, [sendMessage, orgId, taskId], @@ -131,7 +122,7 @@ export function Chat({ className, orgId, taskId }: Props) { { text: `I've provided the following information:\n\n${infoText}\n\nYou can now continue with creating the automation script.`, }, - { body: { modelId: Models.OpenAIGPT5Mini, reasoningEffort: 'medium', orgId, taskId } }, + { body: { modelId: 'openai/gpt-5-mini', reasoningEffort: 'medium', orgId, taskId } }, ); }, [sendMessage, orgId, taskId], @@ -157,25 +148,58 @@ export function Chat({ className, orgId, taskId }: Props) { /> -
- - - -
-
- +
+
+ + + + + + Tasks + + + + + + + + + + {taskName || 'Task'} + + + + + + + + + + + Integration Builder + + + + + +
- Chat
{/* Messages Area */} {!hasMessages ? (
{ event.preventDefault(); validateAndSubmitMessage(input); @@ -186,7 +210,7 @@ export function Chat({ className, orgId, taskId }: Props) { {/* Top Section - Fixed Position */}

- What do you want to automate today? + What evidence do you want to collect?

- - {example.icon} - + {example.title}

{example.title} @@ -234,7 +262,7 @@ export function Chat({ className, orgId, taskId }: Props) { ) : (

- + {messages.map((message) => ( ({ chat }); const hasMessages = messages.length > 0; - console.log('hasMessages', hasMessages); - return (
- -
    Chat - Test Scripts Workflow
{/* Mobile layout tabs taking the whole space*/}
- - - - + @@ -49,15 +39,27 @@ export function AutomationPageClient({ orgId, taskId }: Props) {
{/* Desktop layout: Chat on left, Workflow on right OR Chat full-screen */} -
- {scriptUrl || hasMessages ? ( - } - right={} - /> - ) : ( - - )} +
+
+ +
+ + {/* Workflow panel - slides in from right */} +
+ {(scriptUrl || hasMessages) && ( +
+ +
+ )} +
); diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/automation/AutomationTester.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/automation/AutomationTester.tsx deleted file mode 100644 index a23bfff4d..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/automation/AutomationTester.tsx +++ /dev/null @@ -1,88 +0,0 @@ -'use client'; - -import { useMemo, useState } from 'react'; -import { Button } from '../../components/ui/button'; -import { Label } from '../../components/ui/label'; -import { ScrollArea } from '../../components/ui/scroll-area'; -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from '../../components/ui/select'; -import { useTaskAutomationExecution, useTaskAutomationScriptsList } from '../../hooks'; - -interface Props { - className?: string; - orgId: string; - taskId: string; -} - -export function AutomationTester({ className, orgId, taskId }: Props) { - const { scripts, isLoading, refresh } = useTaskAutomationScriptsList({ orgId }); - const [selectedKey, setSelectedKey] = useState(); - - const { execute, isExecuting, result, error } = useTaskAutomationExecution({ - orgId, - taskId, - onSuccess: () => { - // Refresh the scripts list after successful execution - refresh(); - }, - }); - - const handleTest = async () => { - await execute(); - }; - - const displayResult = useMemo(() => { - if (error) { - return JSON.stringify({ error: error.message }, null, 2); - } - if (result) { - return JSON.stringify(result, null, 2); - } - return 'No results yet'; - }, [result, error]); - - return ( -
-
-
-

Automation Script Tester

-
-
- - -
- -
-
-
- -
{displayResult}
-
-
-
-
- ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/banner.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/banner.tsx deleted file mode 100644 index d29185972..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/banner.tsx +++ /dev/null @@ -1,37 +0,0 @@ -'use client' - -import { XIcon } from 'lucide-react' -import { useState } from 'react' - -interface Props { - defaultOpen: boolean - onDismiss: () => void -} - -export function Banner({ defaultOpen, onDismiss }: Props) { - const [open, setOpen] = useState(defaultOpen) - if (!open) { - return null - } - - return ( -
- Vercel Coding Agent demo This demo showcases a full-stack - coding agent built with Vercel's AI Cloud, AI SDK, and Next.js This - example gives you full flexibility of the underlying model via Vercel AI - Gateway and code execution via Vercel Sandbox. For a drop-in, higher-level - solution for adding vibe coding capabilities to your applications, check - out the v0 Platform API. - -
- ) -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/code-writer.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/code-writer.tsx deleted file mode 100644 index b80e3a30f..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/code-writer.tsx +++ /dev/null @@ -1,39 +0,0 @@ -import { FileIcon } from 'lucide-react'; - -export function CodeWriter(props: { filename?: string; className?: string }) { - const lines = [ - 'const run = async () => {', - " const res = await fetch('https://api.example.com');", - ' const data = await res.json()', - ' return { ok: true, data }', - '}', - ]; - - return ( -
- {props.filename && ( -
- - {props.filename} -
- )} -
- {lines.map((l, i) => ( -
-
-
- {l} - {i === lines.length - 1 && ( - - )} -
-
- ))} -
-
- ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/create-sandbox.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/create-sandbox.tsx deleted file mode 100644 index 26991a2c5..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/create-sandbox.tsx +++ /dev/null @@ -1,37 +0,0 @@ -import type { DataPart } from '@/ai/messages/data-parts' -import { BoxIcon, CheckIcon, XIcon } from 'lucide-react' -import { Spinner } from './spinner' -import { ToolHeader } from '../tool-header' -import { ToolMessage } from '../tool-message' - -interface Props { - message: DataPart['create-sandbox'] -} - -export function CreateSandbox({ message }: Props) { - return ( - - - - Create Sandbox - -
- - {message.status === 'error' ? ( - - ) : ( - - )} - - - {message.status === 'done' && 'Sandbox created successfully'} - {message.status === 'loading' && 'Creating Sandbox'} - {message.status === 'error' && 'Failed to create sandbox'} - -
-
- ) -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/generate-files.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/generate-files.tsx deleted file mode 100644 index ce1edb832..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/generate-files.tsx +++ /dev/null @@ -1,72 +0,0 @@ -'use client'; -import type { DataPart } from '@/ai/messages/data-parts'; -import { CheckIcon, CloudUploadIcon, XIcon } from 'lucide-react'; -import { useEffect } from 'react'; -import { ToolHeader } from '../tool-header'; -import { ToolMessage } from '../tool-message'; -import { Spinner } from './spinner'; - -export function GenerateFiles(props: { - className?: string; - message: DataPart['generating-files']; -}) { - const lastInProgress = ['error', 'uploading', 'generating'].includes(props.message.status); - - const generated = lastInProgress - ? props.message.paths.slice(0, props.message.paths.length - 1) - : props.message.paths; - - const generating = lastInProgress - ? (props.message.paths[props.message.paths.length - 1] ?? '') - : null; - - // Broadcast progress to filesystem overlay - useEffect(() => { - try { - const path = typeof generating === 'string' ? generating : generated[generated.length - 1]; - if (props.message.status === 'generating') { - window.dispatchEvent(new CustomEvent('sandbox:files-start', { detail: { path } })); - } else if (props.message.status === 'done' || props.message.status === 'error') { - window.dispatchEvent(new CustomEvent('sandbox:files-finish', { detail: { path } })); - } - } catch (_) { - // noop - } - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [props.message.status, props.message.paths]); - - // Don't show the "Uploaded files" message in chat - users don't need to see file paths - if (props.message.status === 'done') { - return null; - } - - return ( - - - - Generating files - -
- {generated.map((path) => ( -
- - {path} -
- ))} - {typeof generating === 'string' && ( -
- - {props.message.status === 'error' ? ( - - ) : ( - - )} - - {generating} -
- )} - {/* Code writing animation is shown in the filesystem overlay, not in chat */} -
-
- ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/get-sandbox-url.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/get-sandbox-url.tsx deleted file mode 100644 index 1614660da..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/get-sandbox-url.tsx +++ /dev/null @@ -1,35 +0,0 @@ -import type { DataPart } from '@/ai/messages/data-parts' -import { CheckIcon, LinkIcon } from 'lucide-react' -import { Spinner } from './spinner' -import { ToolHeader } from '../tool-header' -import { ToolMessage } from '../tool-message' - -export function GetSandboxURL({ - message, -}: { - message: DataPart['get-sandbox-url'] -}) { - return ( - - - - Get Sandbox URL - -
- - - - {message.url ? ( - - {message.url} - - ) : ( - Getting Sandbox URL - )} -
-
- ) -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/index.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/index.tsx index 08ce4f35d..84ad1c2a8 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/index.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/index.tsx @@ -1,17 +1,13 @@ -import type { DataPart } from '@/ai/messages/data-parts'; -import type { Metadata } from '@/ai/messages/metadata'; -import type { TaskAutomationToolSet } from '@/ai/tools/task-automation-tools'; import type { UIMessage } from 'ai'; import { memo } from 'react'; -import { CreateSandbox } from './create-sandbox'; -import { GenerateFiles } from './generate-files'; -import { GetSandboxURL } from './get-sandbox-url'; +import { DataPart } from '../../../lib/types/data-parts'; +import { Metadata } from '../../../lib/types/metadata'; +import { TaskAutomationToolSet } from '../../../tools/task-automation-tools'; import { PromptInfo } from './prompt-info'; import { PromptSecret } from './prompt-secret'; import { Reasoning } from './reasoning'; import { ReportErrors } from './report-errors'; import { ResearchActivity } from './research-activity'; -import { RunCommand } from './run-command'; import { Text } from './text'; interface Props { @@ -29,15 +25,7 @@ export const MessagePart = memo(function MessagePart({ onSecretAdded, onInfoProvided, }: Props) { - if (part.type === 'data-generating-files') { - return ; - } else if (part.type === 'data-create-sandbox') { - return ; - } else if (part.type === 'data-get-sandbox-url') { - return ; - } else if (part.type === 'data-run-command') { - return ; - } else if (part.type === 'reasoning') { + if (part.type === 'reasoning') { return ; } else if (part.type === 'data-report-errors') { return ; diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/report-errors.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/report-errors.tsx index bcca74c52..e73db2a6a 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/report-errors.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/report-errors.tsx @@ -1,14 +1,10 @@ -import type { DataPart } from '@/ai/messages/data-parts' -import { BugIcon } from 'lucide-react' -import { ToolHeader } from '../tool-header' -import { ToolMessage } from '../tool-message' -import Markdown from 'react-markdown' +import { BugIcon } from 'lucide-react'; +import Markdown from 'react-markdown'; +import type { DataPart } from '../../../lib/types/data-parts'; +import { ToolHeader } from '../tool-header'; +import { ToolMessage } from '../tool-message'; -export function ReportErrors({ - message, -}: { - message: DataPart['report-errors'] -}) { +export function ReportErrors({ message }: { message: DataPart['report-errors'] }) { return ( @@ -19,5 +15,5 @@ export function ReportErrors({ {message.summary}
- ) + ); } diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/run-command.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/run-command.tsx deleted file mode 100644 index 0d095d732..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/message-part/run-command.tsx +++ /dev/null @@ -1,38 +0,0 @@ -import type { DataPart } from '@/ai/messages/data-parts' -import { CheckIcon, SquareChevronRightIcon, XIcon } from 'lucide-react' -import { Spinner } from './spinner' -import { ToolHeader } from '../tool-header' -import { ToolMessage } from '../tool-message' -import Markdown from 'react-markdown' - -export function RunCommand({ message }: { message: DataPart['run-command'] }) { - return ( - - - - {message.status === 'executing' && 'Executing'} - {message.status === 'waiting' && 'Waiting'} - {message.status === 'running' && 'Running in background'} - {message.status === 'done' && message.exitCode !== 1 && 'Finished'} - {message.status === 'done' && message.exitCode === 1 && 'Errored'} - {message.status === 'error' && 'Errored'} - -
- - {(message.exitCode && message.exitCode > 0) || - message.status === 'error' ? ( - - ) : ( - - )} - - {`\`${message.command} ${message.args.join( - ' ' - )}\``} -
-
- ) -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/types.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/types.tsx index 701ce31f2..4567b0b50 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/types.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/chat/types.tsx @@ -1,6 +1,6 @@ -import type { DataPart } from '@/ai/messages/data-parts'; -import type { Metadata } from '@/ai/messages/metadata'; -import type { TaskAutomationToolSet } from '@/ai/tools/task-automation-tools'; import type { UIMessage } from 'ai'; +import type { DataPart } from '../../lib/types/data-parts'; +import { Metadata } from '../../lib/types/metadata'; +import { TaskAutomationToolSet } from '../../tools/task-automation-tools'; export type ChatUIMessage = UIMessage; diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/command-logs.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/command-logs.tsx deleted file mode 100644 index 56ad17ff4..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/command-logs.tsx +++ /dev/null @@ -1,101 +0,0 @@ -import { cn } from '@/lib/utils'; -import { useEffect, useRef } from 'react'; -import z from 'zod/v3'; -import type { Command, CommandLog } from './types'; - -interface Props { - command: Command; - onLog: (data: { sandboxId: string; cmdId: string; log: CommandLog }) => void; - onCompleted: (data: Command) => void; -} - -export function CommandLogs({ command, onLog, onCompleted }: Props) { - const ref = useRef>>(null); - - useEffect(() => { - if (!ref.current) { - const iterator = getCommandLogs(command.sandboxId, command.cmdId); - ref.current = iterator; - (async () => { - for await (const log of iterator) { - onLog({ - sandboxId: command.sandboxId, - cmdId: command.cmdId, - log, - }); - } - - const log = await getCommand(command.sandboxId, command.cmdId); - onCompleted({ - sandboxId: log.sandboxId, - cmdId: log.cmdId, - startedAt: log.startedAt, - exitCode: log.exitCode ?? 0, - command: command.command, - args: command.args, - }); - })(); - } - // eslint-disable-next-line react-hooks/exhaustive-deps - }, []); - - return ( -
{logContent(command)}
- ); -} - -function logContent(command: Command) { - const date = new Date(command.startedAt).toLocaleTimeString('en-US', { - hour12: false, - hour: '2-digit', - minute: '2-digit', - second: '2-digit', - }); - - const line = `${command.command} ${command.args.join(' ')}`; - const body = command.logs?.map((log) => log.data).join('') || ''; - return `[${date}] ${line}\n${body}`; -} - -const logSchema = z.object({ - data: z.string(), - stream: z.enum(['stdout', 'stderr']), - timestamp: z.number(), -}); - -async function* getCommandLogs(sandboxId: string, cmdId: string) { - const response = await fetch(`/api/tasks-automations/sandboxes/${sandboxId}/cmds/${cmdId}/logs`, { - headers: { 'Content-Type': 'application/json' }, - }); - - const reader = response.body!.getReader(); - const decoder = new TextDecoder(); - let line = ''; - while (true) { - const { done, value } = await reader.read(); - if (done) break; - - line += decoder.decode(value, { stream: true }); - const lines = line.split('\n'); - for (let i = 0; i < lines.length - 1; i++) { - if (lines[i]) { - const logEntry = JSON.parse(lines[i]); - yield logSchema.parse(logEntry); - } - } - line = lines[lines.length - 1]; - } -} - -const cmdSchema = z.object({ - sandboxId: z.string(), - cmdId: z.string(), - startedAt: z.number(), - exitCode: z.number().optional(), -}); - -async function getCommand(sandboxId: string, cmdId: string) { - const response = await fetch(`/api/tasks-automations/sandboxes/${sandboxId}/cmds/${cmdId}`); - const json = await response.json(); - return cmdSchema.parse(json); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/commands-logs-stream.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/commands-logs-stream.tsx deleted file mode 100644 index 27a6c00c0..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/commands-logs-stream.tsx +++ /dev/null @@ -1,92 +0,0 @@ -'use client'; - -import { useEffect, useRef } from 'react'; -import stripAnsi from 'strip-ansi'; -import z from 'zod/v3'; -import { useSandboxStore } from '../../state'; - -type StreamingCommandLogs = Record>>; - -export function CommandLogsStream() { - const { sandboxId, commands, addLog, upsertCommand } = useSandboxStore(); - const ref = useRef({}); - - useEffect(() => { - if (sandboxId) { - for (const command of commands.filter((command) => typeof command.exitCode === 'undefined')) { - if (!ref.current[command.cmdId]) { - const iterator = getCommandLogs(sandboxId, command.cmdId); - ref.current[command.cmdId] = iterator; - (async () => { - for await (const log of iterator) { - addLog({ - sandboxId: sandboxId, - cmdId: command.cmdId, - log: log, - }); - } - - const log = await getCommand(sandboxId, command.cmdId); - upsertCommand({ - sandboxId: log.sandboxId, - cmdId: log.cmdId, - exitCode: log.exitCode ?? 0, - command: command.command, - args: command.args, - }); - })(); - } - } - } - }, [sandboxId, commands, addLog, upsertCommand]); - - return null; -} - -const logSchema = z.object({ - data: z.string(), - stream: z.enum(['stdout', 'stderr']), - timestamp: z.number(), -}); - -async function* getCommandLogs(sandboxId: string, cmdId: string) { - const response = await fetch(`/api/tasks-automations/sandboxes/${sandboxId}/cmds/${cmdId}/logs`, { - headers: { 'Content-Type': 'application/json' }, - }); - - const reader = response.body!.getReader(); - const decoder = new TextDecoder(); - let line = ''; - while (true) { - const { done, value } = await reader.read(); - if (done) break; - - line += decoder.decode(value, { stream: true }); - const lines = line.split('\n'); - for (let i = 0; i < lines.length - 1; i++) { - if (lines[i]) { - const logEntry = JSON.parse(lines[i]); - const parsed = logSchema.parse(logEntry); - yield { - data: stripAnsi(parsed.data), - stream: parsed.stream, - timestamp: parsed.timestamp, - }; - } - } - line = lines[lines.length - 1]; - } -} - -const cmdSchema = z.object({ - sandboxId: z.string(), - cmdId: z.string(), - startedAt: z.number(), - exitCode: z.number().optional(), -}); - -async function getCommand(sandboxId: string, cmdId: string) { - const response = await fetch(`/api/tasks-automations/sandboxes/${sandboxId}/cmds/${cmdId}`); - const json = await response.json(); - return cmdSchema.parse(json); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/commands-logs.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/commands-logs.tsx deleted file mode 100644 index 60ba2d911..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/commands-logs.tsx +++ /dev/null @@ -1,52 +0,0 @@ -'use client'; - -import { SquareChevronRight } from 'lucide-react'; -import { useEffect, useRef } from 'react'; -import { Panel, PanelHeader } from '../../components/panels/panels'; -import { ScrollArea } from '../../components/ui/scroll-area'; -import type { Command } from './types'; - -interface Props { - className?: string; - commands: Command[]; -} - -export function CommandsLogs(props: Props) { - const bottomRef = useRef(null); - - useEffect(() => { - bottomRef.current?.scrollIntoView({ behavior: 'smooth' }); - }, [props.commands]); - - return ( - - - - Remote Output - -
- -
- {props.commands.map((command) => { - const date = new Date(command.startedAt).toLocaleTimeString('en-US', { - hour12: false, - hour: '2-digit', - minute: '2-digit', - second: '2-digit', - }); - - const line = `${command.command} ${command.args.join(' ')}`; - const body = command.logs?.map((log) => log.data).join('') || ''; - return ( -
-                  {`[${date}] ${line}\n${body}`}
-                
- ); - })} -
-
- -
- - ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/types.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/types.ts deleted file mode 100644 index 2e7b11cbd..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/commands-logs/types.ts +++ /dev/null @@ -1,16 +0,0 @@ -export interface Command { - background?: boolean - sandboxId: string - cmdId: string - startedAt: number - command: string - args: string[] - exitCode?: number - logs?: CommandLog[] -} - -export interface CommandLog { - data: string - stream: 'stdout' | 'stderr' - timestamp: number -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/error-monitor.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/error-monitor.tsx deleted file mode 100644 index 1ada7dd85..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/error-monitor.tsx +++ /dev/null @@ -1,125 +0,0 @@ -'use client'; - -import { useChat } from '@ai-sdk/react'; -import { createContext, useCallback, useContext, useEffect, useRef, useTransition } from 'react'; -import { useSettings } from '../../components/settings/use-settings'; -import { useSharedChatContext } from '../../lib/chat-context'; -import { useCommandErrorsLogs } from '../../state'; -import { getSummary } from './get-summary'; -import { type Line } from './schemas'; -import { useMonitorState } from './state'; - -interface Props { - children: React.ReactNode; - debounceTimeMs?: number; -} - -export function ErrorMonitor({ children, debounceTimeMs = 10000 }: Props) { - const [pending, startTransition] = useTransition(); - const { cursor, scheduled, setCursor, setScheduled } = useMonitorState(); - const { errors } = useCommandErrorsLogs(); - const { fixErrors } = useSettings(); - const { chat } = useSharedChatContext(); - const { sendMessage, status: chatStatus, messages } = useChat({ chat }); - const submitTimeout = useRef(null); - const inspectedErrors = useRef(0); - const lastReportedErrors = useRef([]); - const errorReportCount = useRef>(new Map()); - const lastErrorReportTime = useRef(0); - const clearSubmitTimeout = useCallback(() => { - if (submitTimeout.current) { - setScheduled(false); - clearTimeout(submitTimeout.current); - submitTimeout.current = null; - } - }, [setScheduled]); - - const status = - chatStatus !== 'ready' || fixErrors === false - ? 'disabled' - : pending || scheduled - ? 'pending' - : 'ready'; - - const getErrorKey = (error: Line) => { - return `${error.command}-${error.args.join(' ')}-${error.data.slice(0, 100)}`; - }; - - const handleErrors = (errors: Line[], prev: Line[]) => { - const now = Date.now(); - const timeSinceLastReport = now - lastErrorReportTime.current; - - if (timeSinceLastReport < 60000) { - return; - } - - const errorKeys = errors.map(getErrorKey); - const uniqueErrorKeys = [...new Set(errorKeys)]; - - const newErrors = uniqueErrorKeys.filter((key) => { - const count = errorReportCount.current.get(key) || 0; - return count < 1; - }); - - if (newErrors.length === 0) { - return; - } - - startTransition(async () => { - const summary = await getSummary(errors, prev); - if (summary.shouldBeFixed) { - newErrors.forEach((key) => { - errorReportCount.current.set(key, 1); - }); - - lastReportedErrors.current = newErrors; - lastErrorReportTime.current = Date.now(); - - sendMessage({ - role: 'user', - parts: [{ type: 'data-report-errors', data: summary }], - }); - } - }); - }; - - useEffect(() => { - if (messages.length === 0) { - errorReportCount.current.clear(); - lastReportedErrors.current = []; - lastErrorReportTime.current = 0; - } - }, [messages.length]); - - useEffect(() => { - if (status === 'ready' && inspectedErrors.current < errors.length) { - const prev = errors.slice(0, cursor); - const pending = errors.slice(cursor); - inspectedErrors.current = errors.length; - setScheduled(true); - clearSubmitTimeout(); - submitTimeout.current = setTimeout(() => { - setScheduled(false); - setCursor(errors.length); - handleErrors(pending, prev); - }, debounceTimeMs); - } else if (status === 'disabled') { - clearSubmitTimeout(); - } - // eslint-disable-next-line react-hooks/exhaustive-deps -- This is fine - }, [clearSubmitTimeout, cursor, errors, status]); - - return {children}; -} - -const Context = createContext<{ - status: 'ready' | 'pending' | 'disabled'; -} | null>(null); - -export function useErrorMonitor() { - const context = useContext(Context); - if (!context) { - throw new Error('useErrorMonitor must be used within a ErrorMonitor'); - } - return context; -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/get-summary.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/get-summary.ts deleted file mode 100644 index 8765f1205..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/get-summary.ts +++ /dev/null @@ -1,15 +0,0 @@ -import { resultSchema, type Line, type Lines } from './schemas' - -export async function getSummary(lines: Line[], previous: Line[]) { - const response = await fetch('/api/errors', { - body: JSON.stringify({ lines, previous } satisfies Lines), - method: 'POST', - }) - - if (!response.ok) { - throw new Error(`Failed to fetch errors summary: ${response.statusText}`) - } - - const body = await response.json() - return resultSchema.parse(body) -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/schemas.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/schemas.ts deleted file mode 100644 index 43f46881c..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/schemas.ts +++ /dev/null @@ -1,33 +0,0 @@ -import z from 'zod' - -export const lineSchema = z.object({ - command: z.string().describe('The command that generated the log'), - args: z.array(z.string()).describe('Arguments passed to the command'), - stream: z.enum(['stdout', 'stderr']).describe('Stream type of the log'), - data: z.string().describe('The log content'), - timestamp: z.number().describe('The timestamp of the log entry'), -}) - -export const linesSchema = z.object({ - lines: z.array(lineSchema), - previous: z.array(lineSchema), -}) - -export const resultSchema = z.object({ - shouldBeFixed: z - .boolean() - .describe( - 'Whether the logs contain actionable errors that require code fixes (not just warnings or info messages)' - ), - summary: z - .string() - .describe( - 'A summary of actionable errors found in the logs, including error types, affected files, and specific failure reasons. Empty if no actionable errors found. It can be Markdown for better readability.' - ), - paths: z.array( - z.string().describe('List of file paths that contain actionable errors.') - ), -}) - -export type Line = z.infer -export type Lines = z.infer diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/state.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/state.ts deleted file mode 100644 index 5819994bd..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/state.ts +++ /dev/null @@ -1,15 +0,0 @@ -import { create } from 'zustand' - -interface State { - cursor: number - scheduled: boolean - setCursor: (cursor: number) => void - setScheduled: (scheduled: boolean) => void -} - -export const useMonitorState = create((set) => ({ - cursor: 0, - scheduled: false, - setCursor: (cursor) => set({ cursor }), - setScheduled: (scheduled) => set({ scheduled }), -})) diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/build-file-tree.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/build-file-tree.tsx deleted file mode 100644 index cddbca038..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/build-file-tree.tsx +++ /dev/null @@ -1,71 +0,0 @@ -export interface FileNode { - children?: FileNode[] - content?: string - expanded?: boolean - name: string - path: string - type: 'file' | 'folder' -} - -interface FileNodeBuilder { - children?: { [key: string]: FileNodeBuilder } - content?: string - expanded?: boolean - name: string - path: string - type: 'file' | 'folder' -} - -export function buildFileTree(paths: string[]): FileNode[] { - if (paths.length === 0) return [] - const root: { [key: string]: FileNodeBuilder } = {} - - for (const path of paths) { - const parts = path.split('/').filter(Boolean) - let current = root - let currentPath = '' - - for (let index = 0; index < parts.length; index++) { - const part = parts[index] - currentPath += '/' + part - const isFile = index === parts.length - 1 - - if (!current[part]) { - current[part] = { - name: part, - type: isFile ? 'file' : 'folder', - path: currentPath, - content: isFile - ? `// Content for ${currentPath}\n// This will be loaded when the file is selected` - : undefined, - children: isFile ? undefined : {}, - expanded: false, - } - } - - if (!isFile) { - current = current[part].children! - } - } - } - - const convertToArray = (obj: { - [key: string]: FileNodeBuilder - }): FileNode[] => { - return Object.values(obj) - .map( - (node): FileNode => ({ - ...node, - children: node.children ? convertToArray(node.children) : undefined, - }) - ) - .sort((a, b) => { - if (a.type !== b.type) { - return a.type === 'folder' ? -1 : 1 - } - return a.name.localeCompare(b.name) - }) - } - - return convertToArray(root) -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/code-writing-overlay.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/code-writing-overlay.tsx deleted file mode 100644 index fa342cf26..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/code-writing-overlay.tsx +++ /dev/null @@ -1,161 +0,0 @@ -import { memo, useEffect, useMemo, useRef, useState } from 'react'; - -// A premium, minimal, code-writing animation designed for the filesystem panel -// Dark-mode first, subtle gradients, elegant motion -export const CodeWritingOverlay = memo(function CodeWritingOverlay(props: { - filename?: string; - className?: string; -}) { - // Smooth animation ticker - const [tick, setTick] = useState(0); - const [showDetails, setShowDetails] = useState(false); - const rafRef = useRef(null); - useEffect(() => { - let last = performance.now(); - const loop = (now: number) => { - const dt = now - last; - if (dt > 60) { - setTick((t) => (t + 1) % 4096); - last = now; - } - rafRef.current = requestAnimationFrame(loop); - }; - rafRef.current = requestAnimationFrame(loop); - return () => { - if (rafRef.current) cancelAnimationFrame(rafRef.current); - }; - }, []); - - const lines = useMemo( - () => [ - 'module.exports = async (event) => {', - ' const orgId = event?.orgId', - " const u = new URL('https://api.internal/resource');", - ' const res = await fetch(u);', - ' const data = await res.json()', - ' return { ok: true, data }', - '}', - ], - [], - ); - - // Premium typing simulation across the full sequence (stable whitespace) - const sequence = useMemo(() => lines.join('\n'), [lines]); - const total = sequence.length; - const typed = Math.max(0, Math.floor(((tick % 2600) / 2600) * (total + 10)) - 5); - const typedClamped = Math.min(total, typed); - const typedText = useMemo(() => sequence.slice(0, typedClamped), [sequence, typedClamped]); - - // Sparkles near the caret for a premium feel (DOM-based position) - const caretRef = useRef(null); - const [sparks, setSparks] = useState<{ id: number; x: number; y: number; life: number }[]>([]); - const nextId = useRef(0); - useEffect(() => { - if (tick % 6 === 0 && caretRef.current) { - const rect = caretRef.current.getBoundingClientRect(); - const host = caretRef.current.offsetParent as HTMLElement | null; - let x = rect.left; - let y = rect.top; - if (host) { - const hostRect = host.getBoundingClientRect(); - x = rect.left - hostRect.left; - y = rect.top - hostRect.top; - } - setSparks((prev) => { - const id = nextId.current++; - return [...prev.filter((s) => s.life > 0.15), { id, x, y, life: 1 }]; - }); - } - // decay - setSparks((prev) => prev.map((s) => ({ ...s, life: s.life - 0.06 }))); - }, [tick]); - - return ( -
- {/* Aurora gradient wash */} -
-
-
-
- - {/* Shimmering grid */} -
- - {/* Card */} -
- {/* Header */} -
-
- - - - - - {props.filename || 'creating file…'} -
- -
- - {/* Body (collapsible like v0) */} -
- {!showDetails && ( -
-
-
-
-
-
-
-
-
-
- )} - - {showDetails && ( -
-
-                {typedText}
-                
-              
- {sparks.map((s) => ( - - ))} -
- )} - - {/* Progress bar */} -
-
-
-
-
-
- ); -}); diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/file-content.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/file-content.tsx deleted file mode 100644 index f6e0962a8..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/file-content.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { memo } from 'react'; -import { PulseLoader } from 'react-spinners'; -import useSWR from 'swr'; -import { SyntaxHighlighter } from './syntax-highlighter'; - -interface Props { - sandboxId: string; - path: string; -} - -export const FileContent = memo(function FileContent({ sandboxId, path }: Props) { - const searchParams = new URLSearchParams({ path }); - const content = useSWR( - `/api/tasks-automations/sandboxes/${sandboxId}/files?${searchParams.toString()}`, - async (pathname: string, init: RequestInit) => { - const response = await fetch(pathname, init); - const text = await response.text(); - return text; - }, - { refreshInterval: 1000 }, - ); - - if (content.isLoading || !content.data) { - return ( -
-
- -
-
- ); - } - - return ; -}); diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/file-explorer.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/file-explorer.tsx deleted file mode 100644 index f4810889f..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/file-explorer.tsx +++ /dev/null @@ -1,329 +0,0 @@ -'use client'; - -import { cn } from '@/lib/utils'; -import { ChevronDownIcon, ChevronRightIcon, FileIcon, FolderIcon } from 'lucide-react'; -import { memo, useCallback, useEffect, useMemo, useState } from 'react'; -import { PulseLoader } from 'react-spinners'; -import { toast } from 'sonner'; -import { FileContent } from '../../components/file-explorer/file-content'; -import { Panel, PanelHeader } from '../../components/panels/panels'; -import { Button } from '../../components/ui/button'; -import { ScrollArea, ScrollBar } from '../../components/ui/scroll-area'; -import { useSandboxStore } from '../../state'; -import { buildFileTree, type FileNode } from './build-file-tree'; -import { CodeWritingOverlay } from './code-writing-overlay'; - -interface Props { - className: string; - disabled?: boolean; - paths: string[]; - sandboxId?: string; - initialSelectedPath?: string; - orgId: string; - taskId: string; -} - -export const FileExplorer = memo(function FileExplorer({ - className, - disabled, - paths, - sandboxId, - initialSelectedPath, - orgId, - taskId, -}: Props) { - const fileTree = useMemo(() => buildFileTree(paths), [paths]); - const [selected, setSelected] = useState(null); - const [fs, setFs] = useState(fileTree); - const isLoading = !disabled && (!paths || paths.length === 0); - const [isWriting, setIsWriting] = useState(false); - const [writingTarget, setWritingTarget] = useState(null); - const [augmented, setAugmented] = useState(false); - - useEffect(() => { - setFs(fileTree); - setAugmented(false); - }, [fileTree]); - - // Auto-select a file when an initial path is provided and present in the tree - useEffect(() => { - if (!initialSelectedPath || selected) return; - const findNode = (nodes: FileNode[]): FileNode | null => { - for (const node of nodes) { - if (node.path === initialSelectedPath) return node; - if (node.children) { - const found = findNode(node.children); - if (found) return found; - } - } - return null; - }; - const node = findNode(fileTree); - if (node) setSelected(node); - }, [initialSelectedPath, fileTree, selected]); - - const toggleFolder = useCallback((path: string) => { - setFs((prev) => { - const updateNode = (nodes: FileNode[]): FileNode[] => - nodes.map((node) => { - if (node.path === path && node.type === 'folder') { - return { ...node, expanded: !node.expanded }; - } else if (node.children) { - return { ...node, children: updateNode(node.children) }; - } else { - return node; - } - }); - return updateNode(prev); - }); - }, []); - - const selectFile = useCallback((node: FileNode) => { - if (node.type === 'file') { - setSelected(node); - } - }, []); - - const renderFileTree = useCallback( - (nodes: FileNode[], depth = 0) => { - return nodes.map((node) => ( - - )); - }, - [selected, toggleFolder, selectFile], - ); - - const [isTesting, setIsTesting] = useState(false); - const { upsertCommand } = useSandboxStore(); - - // Determine if the selected file is a Lambda script - const isLambdaScript = selected?.name.endsWith('.js'); - - // Listen for global write events from chat tool - useEffect(() => { - let depth = 0; - const start = (e: Event) => { - const detail = (e as CustomEvent).detail as { path?: string }; - depth += 1; - setIsWriting(true); - setWritingTarget(detail?.path || null); - }; - const finish = () => { - depth = Math.max(0, depth - 1); - if (depth === 0) { - setTimeout(() => { - setIsWriting(false); - setWritingTarget(null); - }, 300); - } - }; - window.addEventListener('sandbox:files-start', start as EventListener); - window.addEventListener('sandbox:files-finish', finish as EventListener); - return () => { - window.removeEventListener('sandbox:files-start', start as EventListener); - window.removeEventListener('sandbox:files-finish', finish as EventListener); - }; - }, []); - - return ( - - - - Remote Filesystem - {selected && !disabled && ( - {selected.path} - )} - {selected && !disabled && sandboxId && isLambdaScript && ( -
- -
- )} -
- -
- {isWriting && ( -
- -
- )} - {isLoading ? ( -
-
- - Loading filesystem… -
-
- ) : ( -
- -
{renderFileTree(fs)}
-
- {selected && sandboxId && !disabled && ( - - - - - )} -
- )} -
-
- ); -}); - -// Memoized file tree node component -const FileTreeNode = memo(function FileTreeNode({ - node, - depth, - selected, - onToggleFolder, - onSelectFile, - renderFileTree, -}: { - node: FileNode; - depth: number; - selected: FileNode | null; - onToggleFolder: (path: string) => void; - onSelectFile: (node: FileNode) => void; - renderFileTree: (nodes: FileNode[], depth: number) => React.ReactNode; -}) { - const isFolder = node.type === 'folder'; - const fileColor = (name: string) => { - const lower = name.toLowerCase(); - if (/(\.ts|\.tsx|\.js|\.jsx)$/.test(lower)) return 'text-emerald-600 dark:text-emerald-400'; - if (/(\.json|\.yml|\.yaml)$/.test(lower)) return 'text-amber-600 dark:text-amber-400'; - if (/(\.md|\.mdx)$/.test(lower)) return 'text-violet-600 dark:text-violet-400'; - if (/(\.css|\.scss|\.sass)$/.test(lower)) return 'text-pink-600 dark:text-pink-400'; - return 'text-muted-foreground'; - }; - const colorClass = isFolder ? 'text-sky-600 dark:text-sky-400' : fileColor(node.name); - - const handleClick = useCallback(() => { - if (node.type === 'folder') { - onToggleFolder(node.path); - } else { - onSelectFile(node); - } - }, [node, onToggleFolder, onSelectFile]); - - return ( -
-
- {node.type === 'folder' ? ( - <> - {node.expanded ? ( - - ) : ( - - )} - - - ) : ( - <> -
- - - )} - {node.name} -
- - {node.type === 'folder' && node.expanded && node.children && ( -
{renderFileTree(node.children, depth + 1)}
- )} -
- ); -}); diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/syntax-highlighter.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/syntax-highlighter.tsx deleted file mode 100644 index 82c07435f..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/file-explorer/syntax-highlighter.tsx +++ /dev/null @@ -1,114 +0,0 @@ -import { useEffect, useState } from 'react'; -import Prism from 'react-syntax-highlighter'; -import atomOneDark from 'react-syntax-highlighter/dist/esm/styles/hljs/atom-one-dark'; -import atomOneLight from 'react-syntax-highlighter/dist/esm/styles/hljs/atom-one-light'; - -export function SyntaxHighlighter(props: { path: string; code: string }) { - const lang = detectLanguageFromFilename(props.path); - const [isDark, setIsDark] = useState(false); - - useEffect(() => { - if (typeof window === 'undefined') return; - const isHtmlDark = document.documentElement.classList.contains('dark'); - const mq = window.matchMedia('(prefers-color-scheme: dark)'); - const update = () => setIsDark(isHtmlDark || mq.matches); - update(); - const handler = (e: MediaQueryListEvent) => setIsDark(isHtmlDark || e.matches); - mq.addEventListener?.('change', handler); - return () => mq.removeEventListener?.('change', handler); - }, []); - return ( - - {props.code} - - ); -} - -function detectLanguageFromFilename(path: string): string { - const pathParts = path.split('/'); - const extension = pathParts[pathParts.length - 1]?.split('.').pop()?.toLowerCase(); - - const extensionMap: Record = { - // JavaScript/TypeScript - js: 'jsx', - jsx: 'jsx', - ts: 'typescript', - tsx: 'tsx', - mjs: 'javascript', - cjs: 'javascript', - - // Python - py: 'python', - pyw: 'python', - pyi: 'python', - - // Web technologies - html: 'html', - htm: 'html', - css: 'css', - scss: 'scss', - sass: 'sass', - less: 'less', - - // Other popular languages - java: 'java', - c: 'c', - cpp: 'cpp', - cxx: 'cpp', - cc: 'cpp', - h: 'c', - hpp: 'cpp', - cs: 'csharp', - php: 'php', - rb: 'ruby', - go: 'go', - rs: 'rust', - swift: 'swift', - kt: 'kotlin', - scala: 'scala', - sh: 'bash', - bash: 'bash', - zsh: 'bash', - fish: 'bash', - ps1: 'powershell', - - // Data formats - json: 'json', - xml: 'xml', - yaml: 'yaml', - yml: 'yaml', - toml: 'toml', - ini: 'ini', - - // Markup - md: 'markdown', - markdown: 'markdown', - tex: 'latex', - - // Database - sql: 'sql', - - // Config files - dockerfile: 'dockerfile', - gitignore: 'bash', - env: 'bash', - }; - - return extensionMap[extension || ''] || 'text'; -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/modals/sandbox-state.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/modals/sandbox-state.tsx deleted file mode 100644 index fe34f107d..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/modals/sandbox-state.tsx +++ /dev/null @@ -1,36 +0,0 @@ -'use client'; - -import { useEffect } from 'react'; -import useSWR from 'swr'; -import { useSandboxStore } from '../../state'; - -export function SandboxState() { - const { sandboxId, status, setStatus } = useSandboxStore(); - // Remove modal for max duration; keep silent state management. - - return sandboxId ? : null; -} - -interface DirtyCheckerProps { - sandboxId: string; - setStatus: (status: 'running' | 'stopped') => void; -} - -function DirtyChecker({ sandboxId, setStatus }: DirtyCheckerProps) { - const content = useSWR<'ok' | 'stopped'>( - `/api/tasks-automations/sandboxes/${sandboxId}`, - async (pathname: string, init: RequestInit) => { - const response = await fetch(pathname, init); - const { status } = await response.json(); - return status; - }, - { refreshInterval: 1000 }, - ); - - useEffect(() => { - // Treat any 'stopped' status as transient; immediately set to 'running' - if (content.data === 'stopped') setStatus('running'); - }, [setStatus, content.data]); - - return null; -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/modals/welcome.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/modals/welcome.tsx deleted file mode 100644 index 8bd59eecf..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/modals/welcome.tsx +++ /dev/null @@ -1,97 +0,0 @@ -'use client'; - -import { InfoIcon } from 'lucide-react'; -import type { ReactNode } from 'react'; -import { useEffect } from 'react'; -import { create } from 'zustand'; -import { Button } from '../../components/ui/button'; - -interface State { - open: boolean | undefined; - setOpen: (open: boolean) => void; -} - -export const useWelcomeStore = create((set) => ({ - open: undefined, - setOpen: (open) => set({ open }), -})); - -export function Welcome(props: { onDismissAction(): void; defaultOpen: boolean }) { - const { open, setOpen } = useWelcomeStore(); - - useEffect(() => { - setOpen(props.defaultOpen); - }, [setOpen, props.defaultOpen]); - - if (!(typeof open === 'undefined' ? props.defaultOpen : open)) { - return null; - } - - const handleDismiss = () => { - props.onDismissAction(); - setOpen(false); - }; - - return ( -
-
-
-
event.stopPropagation()} - > -
-

- OSS Vibe Coding Platform -

-

- This is a demo of an end-to-end coding platform where the user can - enter text prompts, and the agent will create a full stack application. -

-

- It uses Vercel"s AI Cloud services like{' '} - Sandbox for - secure code execution,{' '} - AI Gateway for - GPT-5 and other models support,{' '} - Fluid Compute for - efficient rendering and streaming, and it"s built with{' '} - Next.js and the{' '} - AI SDK. -

-
-
- -
-
-
-
- ); -} - -export function ToggleWelcome() { - const { open, setOpen } = useWelcomeStore(); - return ( - - ); -} - -function ExternalLink({ children, href }: { children: ReactNode; href: string }) { - return ( - - {children} - - ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/model-selector/use-available-models.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/model-selector/use-available-models.tsx index d29327eb5..6215a3275 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/model-selector/use-available-models.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/model-selector/use-available-models.tsx @@ -21,8 +21,10 @@ export function useAvailableModels() { setError(null); } + const url = `${process.env.NEXT_PUBLIC_ENTERPRISE_API_URL}/api/tasks-automations/models`; + try { - const response = await fetch('/api/tasks-automations/models'); + const response = await fetch(url); if (!response.ok) { throw new Error('Failed to fetch models'); } diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/auto-fix-errors.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/auto-fix-errors.tsx deleted file mode 100644 index f2f633f8a..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/auto-fix-errors.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import { useFixErrors } from '../../components/settings/use-settings'; -import { Checkbox } from '../../components/ui/checkbox'; -import { Label } from '../../components/ui/label'; - -export function AutoFixErrors() { - const [fixErrors, setFixErrors] = useFixErrors(); - return ( -
setFixErrors(!fixErrors)} - > -
- -

- Automatically detects and fixes errors in generated code. -

-
- setFixErrors(checked === 'indeterminate' ? false : checked)} - /> -
- ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/model-selector.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/model-selector.tsx deleted file mode 100644 index babed9afb..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/model-selector.tsx +++ /dev/null @@ -1,58 +0,0 @@ -'use client'; - -import { cn } from '@/lib/utils'; -import { Loader2Icon } from 'lucide-react'; -import { useMemo } from 'react'; -import { - Select, - SelectContent, - SelectGroup, - SelectItem, - SelectLabel, - SelectTrigger, - SelectValue, -} from '../../components/ui/select'; -import { useAvailableModels } from './use-available-models'; -import { useModelId } from './use-settings'; - -export function ModelSelector({ className }: { className?: string }) { - const [modelId, setModelId] = useModelId(); - const { models: available, isLoading, error } = useAvailableModels(); - const models = useMemo( - () => available?.sort((a, b) => a.label.localeCompare(b.label)) || [], - [available], - ); - - return ( - - ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/reasoning-effort.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/reasoning-effort.tsx deleted file mode 100644 index 673e781f0..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/reasoning-effort.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { Models } from '@/ai/constants'; -import { Checkbox } from '../../components/ui/checkbox'; -import { Label } from '../../components/ui/label'; -import { useModelId, useReasoningEffort } from './use-settings'; - -export function ReasoningEffort() { - const [modelId] = useModelId(); - const [effort, setEffort] = useReasoningEffort(); - if (modelId !== Models.OpenAIGPT5 && modelId !== Models.OpenAIGPT5Mini) { - return null; - } - - return ( -
setEffort(effort === 'medium' ? 'low' : 'medium')} - > -
- -

- With GPT-5, you can request higher reasoning effort level. -

-
- setEffort(checked === true ? 'medium' : 'low')} - /> -
- ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/settings.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/settings.tsx deleted file mode 100644 index fad6b63e8..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/settings.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { SlidersVerticalIcon } from 'lucide-react'; -import { Button } from '../../components/ui/button'; -import { Popover, PopoverContent, PopoverTrigger } from '../../components/ui/popover'; -import { AutoFixErrors } from './auto-fix-errors'; -import { ReasoningEffort } from './reasoning-effort'; - -export function Settings() { - return ( - - - - - -
- - -
-
-
- ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/use-available-models.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/use-available-models.tsx deleted file mode 100644 index d29327eb5..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/use-available-models.tsx +++ /dev/null @@ -1,65 +0,0 @@ -import { useCallback, useEffect, useState } from 'react'; - -interface DisplayModel { - id: string; - label: string; -} - -const MAX_RETRIES = 3; -const RETRY_DELAY_MILLIS = 5000; - -export function useAvailableModels() { - const [models, setModels] = useState([]); - const [isLoading, setIsLoading] = useState(true); - const [error, setError] = useState(null); - const [retryCount, setRetryCount] = useState(0); - - const fetchModels = useCallback( - async (isRetry: boolean = false) => { - if (!isRetry) { - setIsLoading(true); - setError(null); - } - - try { - const response = await fetch('/api/tasks-automations/models'); - if (!response.ok) { - throw new Error('Failed to fetch models'); - } - const data = await response.json(); - const newModels = data.models.map((model: { id: string; name: string }) => ({ - id: model.id, - label: model.name, - })); - setModels(newModels); - setError(null); - setRetryCount(0); - setIsLoading(false); - } catch (err) { - setError(err instanceof Error ? err : new Error('Failed to fetch models')); - if (retryCount < MAX_RETRIES) { - setRetryCount((prev) => prev + 1); - setIsLoading(true); - } else { - setIsLoading(false); - } - } finally { - setIsLoading(false); - } - }, - [retryCount], - ); - - useEffect(() => { - if (retryCount === 0) { - fetchModels(false); - } else if (retryCount > 0 && retryCount <= MAX_RETRIES) { - const timerId = setTimeout(() => { - fetchModels(true); - }, RETRY_DELAY_MILLIS); - return () => clearTimeout(timerId); - } - }, [retryCount, fetchModels]); - - return { models, isLoading, error }; -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/use-settings.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/use-settings.ts deleted file mode 100644 index 9e8d07c59..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/settings/use-settings.ts +++ /dev/null @@ -1,29 +0,0 @@ -import { parseAsBoolean, parseAsStringLiteral, useQueryState } from 'nuqs' -import { DEFAULT_MODEL, SUPPORTED_MODELS } from '@/ai/constants' - -export function useSettings() { - const [modelId] = useModelId() - const [fixErrors] = useFixErrors() - const [reasoningEffort] = useReasoningEffort() - return { modelId, fixErrors, reasoningEffort } -} - -export function useModelId() { - return useQueryState( - 'modelId', - parseAsStringLiteral(SUPPORTED_MODELS.map((model) => model)).withDefault( - DEFAULT_MODEL - ) - ) -} - -export function useReasoningEffort() { - return useQueryState( - 'effort', - parseAsStringLiteral(['medium', 'low']).withDefault('low') - ) -} - -export function useFixErrors() { - return useQueryState('fix-errors', parseAsBoolean.withDefault(true)) -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/EmptyState.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/EmptyState.tsx index 9e1413a0b..0c3c84b5f 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/EmptyState.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/EmptyState.tsx @@ -1,6 +1,6 @@ 'use client'; -import { Code2, Zap } from 'lucide-react'; +import { Code, Code2 } from 'lucide-react'; interface Props { type: 'automation' | 'workflow'; @@ -10,14 +10,14 @@ export function EmptyState({ type }: Props) { if (type === 'automation') { return (
-
-
- +
+ +
+

No Integration Yet

+

+ Chat with the Comp AI agent to build your integration. +

-

No Automation Yet

-

- Chat with the AI assistant to build your automation workflow -

); @@ -29,7 +29,7 @@ export function EmptyState({ type }: Props) {
-

No workflow steps found

+

No integration steps found

); diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/UnifiedWorkflowCard.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/UnifiedWorkflowCard.tsx new file mode 100644 index 000000000..1744cc5c8 --- /dev/null +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/UnifiedWorkflowCard.tsx @@ -0,0 +1,154 @@ +'use client'; + +import { Card, CardContent, CardHeader, CardTitle } from '@comp/ui/card'; +import { Loader2, Play, Zap } from 'lucide-react'; +import Image from 'next/image'; +import { useEffect, useState } from 'react'; +import { TaskAutomationWorkflow } from '../../../lib'; + +interface WorkflowStep { + id: string; + title: string; + description: string; + type: 'trigger' | 'action' | 'condition' | 'output'; + iconType: + | 'start' + | 'fetch' + | 'login' + | 'check' + | 'process' + | 'filter' + | 'notify' + | 'complete' + | 'error'; +} + +interface Props { + steps: WorkflowStep[]; + title: string; + onTest?: () => void; + integrationsUsed: TaskAutomationWorkflow['integrationsUsed']; +} + +export function UnifiedWorkflowCard({ steps, title, onTest, integrationsUsed }: Props) { + const [isAnimationComplete, setIsAnimationComplete] = useState(false); + + useEffect(() => { + // Calculate total animation time: card (1s) + expansion (1s) + all steps + const totalAnimationTime = 2500 + steps.length * 1200; + + const timer = setTimeout(() => { + setIsAnimationComplete(true); + }, totalAnimationTime); + + return () => clearTimeout(timer); + }, [steps.length]); + + return ( + + {/* Header with integration icons */} + +
+ {integrationsUsed.map((integration) => ( +
+ {integration.link} +
+ ))} +
+ + {title} +
+ + {/* Steps Section - Pure CSS Animation */} + + + +
+ {steps.map((step, index) => ( +
+ {/* Icon column with connection */} +
+
+ +
+ {/* Connection line - only show if not last step */} + {index < steps.length - 1 && ( +
+ )} +
+ + {/* Content column */} +
+ + {step.title} + + {step.description && ( +

+ {step.description} +

+ )} +
+
+ ))} +
+ + + +
+ {!isAnimationComplete ? ( +
+
+ +
+ Building integration +
+ ) : ( + + )} +
+ + ); +} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/index.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/index.ts index 687409336..95d576f1e 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/index.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/components/index.ts @@ -2,6 +2,7 @@ export { CodeViewer } from './CodeViewer'; export { EmptyState } from './EmptyState'; export { TestDialog } from './TestDialog'; export { TestResultsPanel } from './TestResultsPanel'; +export { UnifiedWorkflowCard } from './UnifiedWorkflowCard'; export { ViewModeSwitch } from './ViewModeSwitch'; export { WorkflowSkeleton } from './WorkflowSkeleton'; export { WorkflowStepCard } from './WorkflowStepCard'; diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/workflow-visualizer-simple.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/workflow-visualizer-simple.tsx index 440362e3e..6b4954e07 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/workflow-visualizer-simple.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/components/workflow/workflow-visualizer-simple.tsx @@ -1,10 +1,9 @@ 'use client'; -import { Models } from '@/ai/constants'; import { cn } from '@/lib/utils'; import { useChat } from '@ai-sdk/react'; import { Button } from '@comp/ui/button'; -import { Play, Zap } from 'lucide-react'; +import { Code, Play, Zap } from 'lucide-react'; import { useParams } from 'next/navigation'; import { useEffect, useMemo } from 'react'; import { @@ -20,9 +19,9 @@ import { CodeViewer, EmptyState, TestResultsPanel, + UnifiedWorkflowCard, ViewModeSwitch, WorkflowSkeleton, - WorkflowStepCard, } from './components'; import type { TestResult } from './types'; @@ -41,8 +40,8 @@ export function WorkflowVisualizerSimple({ className }: Props) { isLoading: isLoadingScript, refresh, } = useTaskAutomationScript({ - orgId: orgId || '', - taskId: taskId || '', + orgId: orgId, + taskId: taskId, enabled: !!orgId && !!taskId, }); @@ -70,9 +69,9 @@ export function WorkflowVisualizerSimple({ className }: Props) { result: executionResult, error: executionError, reset: resetExecution, - } = useTaskAutomationExecution({ orgId: orgId || '', taskId: taskId || '' }); + } = useTaskAutomationExecution({ orgId: orgId, taskId: taskId }); - const { steps, isAnalyzing } = useTaskAutomationWorkflow({ + const { steps, isAnalyzing, integrationsUsed, title } = useTaskAutomationWorkflow({ scriptContent: script?.content, enabled: !!script?.content, }); @@ -135,7 +134,7 @@ Please fix the automation script to resolve this error.`; // Send the error to the chat sendMessage( { text: errorMessage }, - { body: { modelId: Models.OpenAIGPT5Mini, reasoningEffort: 'medium', orgId, taskId } }, + { body: { modelId: 'openai/gpt-5-mini', reasoningEffort: 'medium', orgId, taskId } }, ); // Close the dialog @@ -147,13 +146,13 @@ Please fix the automation script to resolve this error.`; if (showEmptyState) { return ( - +

- Your Automation + Integration Builder

@@ -165,20 +164,33 @@ Please fix the automation script to resolve this error.`; } return ( - - + +

- - {isAnalyzing ? 'Analyzing Automation' : 'Your Automation'} + + Integration Builder

- +
+ + {script && !showEmptyState && ( + + )} +
-
+
{/* Show Test Results Panel INSTEAD of regular content when testing/results available */} {isExecuting || testResult ? ( ) : ( /* Regular Content - Only show when NOT testing */ -
+
{viewMode === 'visual' ? ( // Visual Mode showLoading ? ( ) : steps.length > 0 ? ( -
- {steps.map((step, index) => ( - 0} - /> - ))} -
+ ) : ( ) @@ -219,23 +232,6 @@ Please fix the automation script to resolve this error.`;
)}
- - {/* Fixed Test Button */} - {script && !showEmptyState && ( -
-
- -
-
- )} ); } diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/file-explorer.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/file-explorer.tsx deleted file mode 100644 index 6c893becd..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/file-explorer.tsx +++ /dev/null @@ -1,26 +0,0 @@ -'use client'; - -import { FileExplorer as FileExplorerComponent } from './components/file-explorer/file-explorer'; -import { useSandboxStore } from './state'; - -interface Props { - className: string; - initialSelectedPath?: string; - orgId: string; - taskId: string; -} - -export function FileExplorer({ className, initialSelectedPath, orgId, taskId }: Props) { - const { sandboxId, status, paths } = useSandboxStore(); - return ( - - ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/index.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/index.ts index 8445203eb..6ee556d8f 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/index.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/index.ts @@ -1,12 +1,3 @@ -/** - * Task Automation Hooks - * - * Centralized export for all task automation hooks. - * These hooks provide the core functionality for managing automation scripts, - * execution, and workflow visualization. - */ - export { useTaskAutomationExecution } from './use-task-automation-execution'; export { useTaskAutomationScript } from './use-task-automation-script'; -export { useTaskAutomationScriptsList } from './use-task-automation-scripts-list'; export { useTaskAutomationWorkflow } from './use-task-automation-workflow'; diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-execution.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-execution.ts index 978640fd4..abb5acabc 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-execution.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-execution.ts @@ -43,7 +43,8 @@ export function useTaskAutomationExecution({ const pollRunStatus = async () => { try { - const response = await fetch(`/api/tasks-automations/runs/${runId}`); + const url = `${process.env.NEXT_PUBLIC_ENTERPRISE_API_URL}/api/tasks-automations/runs/${runId}`; + const response = await fetch(url); const data = await response.json(); if (!response.ok) { @@ -51,11 +52,6 @@ export function useTaskAutomationExecution({ } if (data.status === 'COMPLETED' && data.output) { - // Debug logging - console.log('[Automation Execution] Raw API response:', data); - console.log('[Automation Execution] data.output:', data.output); - console.log('[Automation Execution] data.output.output:', data.output.output); - const executionResult: TaskAutomationExecutionResult = { success: data.output.success, data: data.output.output, diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-script.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-script.ts index 557787e87..e75f6bc3b 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-script.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-script.ts @@ -13,10 +13,9 @@ * ``` */ -import { useCallback } from 'react'; import useSWR from 'swr'; +import { TaskAutomationScript, UseTaskAutomationScriptOptions } from '../lib'; import { taskAutomationApi } from '../lib/task-automation-api'; -import type { TaskAutomationScript, UseTaskAutomationScriptOptions } from '../lib/types'; export function useTaskAutomationScript({ orgId, @@ -27,7 +26,7 @@ export function useTaskAutomationScript({ const { data, error, isLoading, mutate } = useSWR( enabled ? ['task-automation-script', scriptKey] : null, - () => taskAutomationApi.s3.getScript(scriptKey), + () => taskAutomationApi.s3.getScript(scriptKey) as Promise, { revalidateOnFocus: false, revalidateOnReconnect: false, @@ -38,26 +37,6 @@ export function useTaskAutomationScript({ }, ); - /** - * Upload a new automation script to S3 - */ - const uploadScript = useCallback( - async (content: string) => { - const result = await taskAutomationApi.s3.uploadScript({ - orgId, - taskId, - content, - type: 'automation', - }); - - // Revalidate the cache - await mutate(); - - return result; - }, - [orgId, taskId, mutate], - ); - return { script: data, isLoading, @@ -65,7 +44,6 @@ export function useTaskAutomationScript({ error, mutate, refresh: mutate, - uploadScript, scriptExists: !isLoading && !error && !!data, }; } diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-scripts-list.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-scripts-list.ts deleted file mode 100644 index 692861392..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-scripts-list.ts +++ /dev/null @@ -1,44 +0,0 @@ -/** - * useTaskAutomationScriptsList Hook - * - * Lists all task automation scripts for a given organization. - * Provides automatic refresh and caching of the scripts list. - * - * @example - * ```tsx - * const { scripts, isLoading, refresh } = useTaskAutomationScriptsList({ - * orgId: 'org_123', - * refreshInterval: 15000 // 15 seconds - * }); - * ``` - */ - -import useSWR from 'swr'; -import { taskAutomationApi } from '../lib/task-automation-api'; -import type { - TaskAutomationScriptsListResponse, - UseTaskAutomationScriptsListOptions -} from '../lib/types'; - -export function useTaskAutomationScriptsList({ - orgId, - refreshInterval = 15000 -}: UseTaskAutomationScriptsListOptions) { - const { data, error, isLoading, mutate } = useSWR( - ['task-automation-scripts-list', orgId], - () => taskAutomationApi.s3.listScripts(orgId), - { - refreshInterval, - revalidateOnFocus: true, - }, - ); - - return { - scripts: data?.items || [], - count: data?.count || 0, - isLoading, - isError: !!error, - error, - refresh: mutate, - }; -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-workflow.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-workflow.ts index 5515f98a1..a2f4e4552 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-workflow.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/hooks/use-task-automation-workflow.ts @@ -14,7 +14,11 @@ */ import { useCallback, useEffect, useState } from 'react'; -import type { TaskAutomationWorkflowStep, UseTaskAutomationWorkflowOptions } from '../lib/types'; +import type { + TaskAutomationWorkflow, + TaskAutomationWorkflowStep, + UseTaskAutomationWorkflowOptions, +} from '../lib/types'; import { taskAutomationApi } from '../lib/task-automation-api'; @@ -23,6 +27,10 @@ export function useTaskAutomationWorkflow({ enabled = true, }: UseTaskAutomationWorkflowOptions) { const [steps, setSteps] = useState([]); + const [title, setTitle] = useState(''); + const [integrationsUsed, setIntegrationsUsed] = useState< + TaskAutomationWorkflow['integrationsUsed'] + >([]); const [description, setDescription] = useState(''); const [isAnalyzing, setIsAnalyzing] = useState(false); const [error, setError] = useState(null); @@ -36,7 +44,9 @@ export function useTaskAutomationWorkflow({ try { // Call the AI-powered workflow analysis API - const result = await taskAutomationApi.workflow.analyzeWorkflow(content); + const result = (await taskAutomationApi.workflow.analyzeWorkflow( + content, + )) as TaskAutomationWorkflow; // Map the API response to our workflow steps format const steps: TaskAutomationWorkflowStep[] = result.steps.map((step, index) => ({ @@ -48,6 +58,8 @@ export function useTaskAutomationWorkflow({ })); setSteps(steps); + setTitle(result.title); + setIntegrationsUsed(result.integrationsUsed); setDescription('Automation workflow'); return { steps, description: 'Automation workflow' }; @@ -98,6 +110,8 @@ export function useTaskAutomationWorkflow({ return { steps, + integrationsUsed, + title, description, isAnalyzing, error, diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/chat-context.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/chat-context.tsx index 45a7d8e80..72ec20f7e 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/chat-context.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/chat-context.tsx @@ -1,6 +1,5 @@ 'use client'; -import { DataPart } from '@/ai/messages/data-parts'; import { Chat } from '@ai-sdk/react'; import { DataUIPart, DefaultChatTransport } from 'ai'; import { createContext, useContext, useMemo, useRef, type ReactNode } from 'react'; @@ -8,6 +7,7 @@ import { toast } from 'sonner'; import { mutate } from 'swr'; import { type ChatUIMessage } from '../components/chat/types'; import { useTaskAutomationDataMapper } from './task-automation-store'; +import { DataPart } from './types/data-parts'; interface ChatContextValue { chat: Chat; @@ -20,13 +20,16 @@ export function ChatProvider({ children }: { children: ReactNode }) { const mapDataToStateRef = useRef(mapDataToState); mapDataToStateRef.current = mapDataToState; + const baseUrl = process.env.NEXT_PUBLIC_ENTERPRISE_API_URL; + const url = `${baseUrl}/api/tasks-automations/chat`; + const chat = useMemo( () => new Chat({ transport: new DefaultChatTransport({ - api: '/api/tasks-automations/chat', + api: url, }), - onToolCall: () => mutate('/api/auth/info'), + onToolCall: () => mutate(`/api/auth/info`), onData: (data: DataUIPart) => mapDataToStateRef.current(data), onError: (error) => { toast.error(`Communication error with the AI: ${error.message}`); diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/task-automation-api.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/task-automation-api.ts index 3e743d95c..11d8d73b8 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/task-automation-api.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/task-automation-api.ts @@ -2,94 +2,23 @@ * Task Automation API Client * * Provides a centralized API client for all task automation operations. - * Handles S3 operations, script execution, and workflow analysis. + * Uses server actions to securely call enterprise API with license key. */ -import type { - TaskAutomationExecuteRequest, - TaskAutomationExecutionResult, - TaskAutomationScript, - TaskAutomationScriptsListResponse, - TaskAutomationUploadRequest, - TaskAutomationUploadResponse, -} from './types'; - -interface ApiError extends Error { - status?: number; - code?: string; -} - -/** - * Generic API client with error handling and response parsing - */ -class ApiClient { - private baseUrl: string; - - constructor(baseUrl = '') { - this.baseUrl = baseUrl; - } - - private async handleResponse(response: Response): Promise { - if (!response.ok) { - const error = await response.json().catch(() => ({ error: 'Unknown error' })); - const apiError = new Error(error.error || error.message || 'Request failed') as ApiError; - apiError.status = response.status; - apiError.code = error.code; - throw apiError; - } - - return response.json(); - } - - async get(endpoint: string, params?: Record): Promise { - // If endpoint is already a full URL or starts with /, use it directly - const url = - endpoint.startsWith('http') || endpoint.startsWith('/') - ? new URL(endpoint, window.location.origin) - : new URL(endpoint, this.baseUrl || window.location.origin); - - if (params) { - Object.entries(params).forEach(([key, value]) => { - url.searchParams.append(key, value); - }); - } - - const response = await fetch(url.toString(), { - method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, - }); - - return this.handleResponse(response); - } - - async post(endpoint: string, data?: any): Promise { - // Construct proper URL - const url = - endpoint.startsWith('http') || endpoint.startsWith('/') - ? endpoint - : `${this.baseUrl || ''}${endpoint}`; - - const response = await fetch(url, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - body: data ? JSON.stringify(data) : undefined, - }); - - return this.handleResponse(response); - } -} - -// Create a singleton instance -const apiClient = new ApiClient(); +import { + analyzeAutomationWorkflow, + executeAutomationScript, + getAutomationRunStatus, + getAutomationScript, + listAutomationScripts, + uploadAutomationScript, +} from '../actions/task-automation-actions'; +import type { TaskAutomationExecuteRequest, TaskAutomationUploadRequest } from './types'; /** * Task Automation API * - * All API operations related to task automation + * All operations use server actions to securely call enterprise API */ export const taskAutomationApi = { /** @@ -100,22 +29,37 @@ export const taskAutomationApi = { * Get a specific automation script from S3 * @param key - The S3 key (format: orgId/taskId.js) */ - getScript: (key: string) => - apiClient.get('/api/tasks-automations/s3/get', { key }), + getScript: async (key: string) => { + const result = await getAutomationScript(key); + if (!result.success) { + throw new Error(result.error || 'Failed to get script'); + } + return result.data; + }, /** * List all automation scripts for an organization * @param orgId - The organization ID */ - listScripts: (orgId: string) => - apiClient.get('/api/tasks-automations/s3/list', { orgId }), + listScripts: async (orgId: string) => { + const result = await listAutomationScripts(orgId); + if (!result.success) { + throw new Error(result.error || 'Failed to list scripts'); + } + return result.data; + }, /** * Upload a new automation script to S3 * @param data - Upload request data */ - uploadScript: (data: TaskAutomationUploadRequest) => - apiClient.post('/api/tasks-automations/s3/upload', data), + uploadScript: async (data: TaskAutomationUploadRequest) => { + const result = await uploadAutomationScript(data); + if (!result.success) { + throw new Error(result.error || 'Failed to upload script'); + } + return result.data; + }, }, /** @@ -123,17 +67,28 @@ export const taskAutomationApi = { */ execution: { /** - * Execute an automation script via Trigger.dev + * Execute an automation script * @param data - Execution request data */ - executeScript: (data: TaskAutomationExecuteRequest) => - apiClient.post('/api/tasks-automations/trigger/execute', data), + executeScript: async (data: TaskAutomationExecuteRequest) => { + const result = await executeAutomationScript(data); + if (!result.success) { + throw new Error(result.error || 'Failed to execute script'); + } + return result.data; + }, /** - * Get run status - * @param runId - The Trigger.dev run ID + * Get run status - Enterprise only + * @param runId - The enterprise run ID */ - getRunStatus: (runId: string) => apiClient.get(`/api/tasks-automations/runs/${runId}`), + getRunStatus: async (runId: string) => { + const result = await getAutomationRunStatus(runId); + if (!result.success) { + throw new Error(result.error || 'Failed to get run status'); + } + return result.data; + }, }, /** @@ -144,14 +99,12 @@ export const taskAutomationApi = { * Analyze a script to extract workflow steps * @param scriptContent - The script content to analyze */ - analyzeWorkflow: (scriptContent: string) => - apiClient.post<{ - steps: Array<{ - title: string; - description: string; - type: string; - iconType: string; - }>; - }>('/api/tasks-automations/workflow/analyze', { scriptContent }), + analyzeWorkflow: async (scriptContent: string) => { + const result = await analyzeAutomationWorkflow(scriptContent); + if (!result.success) { + throw new Error(result.error || 'Failed to analyze workflow'); + } + return result.data; + }, }, }; diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/task-automation-store.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/task-automation-store.ts index 85892b970..09f832a22 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/task-automation-store.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/task-automation-store.ts @@ -5,10 +5,10 @@ * Manages chat status, script generation state, and data mapping. */ -import type { DataPart } from '@/ai/messages/data-parts'; import type { ChatStatus, DataUIPart } from 'ai'; import { create } from 'zustand'; import type { TaskAutomationStoreState, ViewMode } from './types'; +import type { DataPart } from './types/data-parts'; interface TaskAutomationStore extends TaskAutomationStoreState { setChatStatus: (status: ChatStatus) => void; diff --git a/apps/app/src/ai/messages/data-parts.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/types/data-parts.ts similarity index 100% rename from apps/app/src/ai/messages/data-parts.ts rename to apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/types/data-parts.ts diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/types/index.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/types/index.ts index fb4bb43a2..eec4fea7e 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/types/index.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/types/index.ts @@ -71,9 +71,16 @@ export interface TaskAutomationWorkflowStep { iconType: WorkflowIconType; } +type IntegrationsUsed = Array<{ + name: string; + link: string; +}>; + export interface TaskAutomationWorkflow { steps: TaskAutomationWorkflowStep[]; + title: string; description: string; + integrationsUsed: IntegrationsUsed; } // ============================================================================ diff --git a/apps/app/src/ai/messages/metadata.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/types/metadata.ts similarity index 100% rename from apps/app/src/ai/messages/metadata.ts rename to apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/types/metadata.ts diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/use-local-storage-value.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/use-local-storage-value.ts deleted file mode 100644 index 4de24383a..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/use-local-storage-value.ts +++ /dev/null @@ -1,22 +0,0 @@ -import { useEffect, useState } from 'react' - -export function useLocalStorageValue(key: string) { - const [value, setValue] = useState('') - const [isInitialized, setIsInitialized] = useState(false) - - useEffect(() => { - const storedValue = localStorage.getItem(key) - if (storedValue !== null) { - setValue(storedValue) - } - setIsInitialized(true) - }, [key]) - - useEffect(() => { - if (isInitialized) { - localStorage.setItem(key, value) - } - }, [key, value, isInitialized]) - - return [value, setValue] as const -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/utils.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/utils.ts deleted file mode 100644 index fed2fe91e..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/lib/utils.ts +++ /dev/null @@ -1,6 +0,0 @@ -import { clsx, type ClassValue } from 'clsx' -import { twMerge } from 'tailwind-merge' - -export function cn(...inputs: ClassValue[]) { - return twMerge(clsx(inputs)) -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/logs.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/logs.tsx deleted file mode 100644 index 1ba3c1fad..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/logs.tsx +++ /dev/null @@ -1,9 +0,0 @@ -'use client'; - -import { CommandsLogs } from './components/commands-logs/commands-logs'; -import { useSandboxStore } from './state'; - -export function Logs(props: { className?: string }) { - const { commands } = useSandboxStore(); - return ; -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/page.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/page.tsx index 56f489779..4d76078eb 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/page.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/page.tsx @@ -1,3 +1,5 @@ +import { db } from '@db'; +import { redirect } from 'next/navigation'; import { AutomationLayoutWrapper } from './automation-layout-wrapper'; import { AutomationPageClient } from './components/AutomationPageClient'; @@ -8,10 +10,23 @@ export default async function Page({ }) { const { taskId, orgId } = await params; + const task = await db.task.findUnique({ + where: { + id: taskId, + organizationId: orgId, + }, + }); + + if (!task) { + redirect('/tasks'); + } + + const taskName = task.title; + return (
- +
); diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/preview.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/preview.tsx deleted file mode 100644 index 2e1903117..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/preview.tsx +++ /dev/null @@ -1,20 +0,0 @@ -'use client'; - -import { Preview as PreviewComponent } from './components/preview/preview'; -import { useSandboxStore } from './state'; - -interface Props { - className?: string; -} - -export function Preview({ className }: Props) { - const { status, url, urlUUID } = useSandboxStore(); - return ( - - ); -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/script-initializer.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/script-initializer.tsx deleted file mode 100644 index e9558c207..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/script-initializer.tsx +++ /dev/null @@ -1,25 +0,0 @@ -'use client'; - -import { useEffect } from 'react'; -import { useTaskAutomationStore } from './lib/task-automation-store'; -import { useTaskAutomationScript } from './hooks'; - -interface Props { - orgId: string; - taskId: string; -} - -export function ScriptInitializer({ orgId, taskId }: Props) { - const { setScriptGenerated } = useTaskAutomationStore(); - const { script, scriptExists } = useTaskAutomationScript({ orgId, taskId }); - - useEffect(() => { - if (scriptExists && script) { - // Script exists, mark it as generated - setScriptGenerated(true, script.key); - console.log('[ScriptInitializer] Found existing script:', script.key); - } - }, [scriptExists, script, setScriptGenerated]); - - return null; -} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/state.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/state.ts deleted file mode 100644 index f06788654..000000000 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/state.ts +++ /dev/null @@ -1,153 +0,0 @@ -import type { DataPart } from '@/ai/messages/data-parts'; -import type { ChatStatus, DataUIPart } from 'ai'; -import { useMemo } from 'react'; -import { create } from 'zustand'; -import type { Command, CommandLog } from './components/commands-logs/types'; -import { useMonitorState } from './components/error-monitor/state'; - -interface SandboxStore { - addGeneratedFiles: (files: string[]) => void; - addLog: (data: { sandboxId: string; cmdId: string; log: CommandLog }) => void; - addPaths: (paths: string[]) => void; - chatStatus: ChatStatus; - clearGeneratedFiles: () => void; - commands: Command[]; - generatedFiles: Set; - paths: string[]; - sandboxId?: string; - setChatStatus: (status: ChatStatus) => void; - setSandboxId: (id: string) => void; - setStatus: (status: 'running' | 'stopped') => void; - setUrl: (url: string, uuid: string) => void; - status?: 'running' | 'stopped'; - upsertCommand: (command: Omit) => void; - url?: string; - urlUUID?: string; -} - -function getBackgroundCommandErrorLines(commands: Command[]) { - return commands - .flatMap(({ command, args, background, logs = [] }) => - logs.map((log) => ({ command, args, background, ...log })), - ) - .sort((logA, logB) => logA.timestamp - logB.timestamp) - .filter((log) => log.stream === 'stderr' && log.background); -} - -export function useCommandErrorsLogs() { - const { commands } = useSandboxStore(); - const errors = useMemo(() => getBackgroundCommandErrorLines(commands), [commands]); - return { errors }; -} - -export const useSandboxStore = create()((set) => ({ - addGeneratedFiles: (files) => - set((state) => ({ - generatedFiles: new Set([...state.generatedFiles, ...files]), - })), - addLog: (data) => { - set((state) => { - const idx = state.commands.findIndex((c) => c.cmdId === data.cmdId); - if (idx === -1) { - console.warn(`Command with ID ${data.cmdId} not found.`); - return state; - } - const updatedCmds = [...state.commands]; - updatedCmds[idx] = { - ...updatedCmds[idx], - logs: [...(updatedCmds[idx].logs ?? []), data.log], - }; - return { commands: updatedCmds }; - }); - }, - addPaths: (paths) => set((state) => ({ paths: [...new Set([...state.paths, ...paths])] })), - chatStatus: 'ready', - clearGeneratedFiles: () => set(() => ({ generatedFiles: new Set() })), - commands: [], - generatedFiles: new Set(), - paths: [], - setChatStatus: (status) => - set((state) => (state.chatStatus === status ? state : { chatStatus: status })), - setSandboxId: (sandboxId) => - set((state) => ({ - sandboxId, - status: 'running', - commands: [], - paths: state.paths || [], // Preserve existing paths - url: undefined, - generatedFiles: new Set(), - })), - setStatus: (status) => set(() => ({ status })), - setUrl: (url, urlUUID) => set(() => ({ url, urlUUID })), - upsertCommand: (cmd) => { - set((state) => { - const existingIdx = state.commands.findIndex((c) => c.cmdId === cmd.cmdId); - const idx = existingIdx !== -1 ? existingIdx : state.commands.length; - const prev = state.commands[idx] ?? { startedAt: Date.now(), logs: [] }; - const cmds = [...state.commands]; - cmds[idx] = { ...prev, ...cmd }; - return { commands: cmds }; - }); - }, -})); - -interface FileExplorerStore { - paths: string[]; - addPath: (path: string) => void; -} - -export const useFileExplorerStore = create()((set) => ({ - paths: [], - addPath: (path) => { - set((state) => { - if (!state.paths.includes(path)) { - return { paths: [...state.paths, path] }; - } - return state; - }); - }, -})); - -export function useDataStateMapper() { - const { addPaths, setSandboxId, setUrl, upsertCommand, addGeneratedFiles } = useSandboxStore(); - const { errors } = useCommandErrorsLogs(); - const { setCursor } = useMonitorState(); - - return (data: DataUIPart) => { - switch (data.type) { - case 'data-create-sandbox': - if (data.data.sandboxId) { - setSandboxId(data.data.sandboxId); - } - break; - case 'data-generating-files': - if (data.data.status === 'uploaded') { - setCursor(errors.length); - addPaths(data.data.paths); - addGeneratedFiles(data.data.paths); - } - break; - case 'data-run-command': - if ( - data.data.commandId && - (data.data.status === 'executing' || data.data.status === 'running') - ) { - upsertCommand({ - background: data.data.status === 'running', - sandboxId: data.data.sandboxId, - cmdId: data.data.commandId, - command: data.data.command, - args: data.data.args, - }); - } - break; - case 'data-get-sandbox-url': - if (data.data.url) { - setUrl(data.data.url, crypto.randomUUID()); - } - break; - default: - break; - } - }; -} diff --git a/apps/app/src/ai/tools/exa-search.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/exa-search.ts similarity index 100% rename from apps/app/src/ai/tools/exa-search.ts rename to apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/exa-search.ts diff --git a/apps/app/src/ai/tools/firecrawl.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/firecrawl.ts similarity index 100% rename from apps/app/src/ai/tools/firecrawl.ts rename to apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/firecrawl.ts diff --git a/apps/app/src/ai/gateway.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/gateway.ts similarity index 67% rename from apps/app/src/ai/gateway.ts rename to apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/gateway.ts index c73c09384..744e1b144 100644 --- a/apps/app/src/ai/gateway.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/gateway.ts @@ -2,7 +2,6 @@ import { createGatewayProvider } from '@ai-sdk/gateway'; import type { OpenAIResponsesProviderOptions } from '@ai-sdk/openai'; import type { LanguageModelV2 } from '@ai-sdk/provider'; import type { JSONValue } from 'ai'; -import { Models } from './constants'; export async function getAvailableModels() { const gateway = gatewayInstance(); @@ -21,22 +20,17 @@ export function getModelOptions( options?: { reasoningEffort?: 'minimal' | 'low' | 'medium' }, ): ModelOptions { const gateway = gatewayInstance(); - if (modelId === Models.OpenAIGPT5 || modelId === Models.OpenAIGPT5Mini) { - return { - model: gateway(modelId), - providerOptions: { - openai: { - include: ['reasoning.encrypted_content'], - reasoningEffort: options?.reasoningEffort ?? 'low', - reasoningSummary: 'auto', - serviceTier: 'priority', - } satisfies OpenAIResponsesProviderOptions, - }, - }; - } return { model: gateway(modelId), + providerOptions: { + openai: { + include: ['reasoning.encrypted_content'], + reasoningEffort: options?.reasoningEffort ?? 'low', + reasoningSummary: 'auto', + serviceTier: 'priority', + } satisfies OpenAIResponsesProviderOptions, + }, }; } diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/generate-files/deferred.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/generate-files/deferred.ts new file mode 100644 index 000000000..f6bcec13b --- /dev/null +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/generate-files/deferred.ts @@ -0,0 +1,24 @@ +export class Deferred { + private resolveFn: (value: T | PromiseLike) => void = () => {} + private rejectFn: (reason?: any) => void = () => {} + private _promise: Promise + + constructor() { + this._promise = new Promise((resolve, reject) => { + this.resolveFn = resolve + this.rejectFn = reject + }) + } + + get promise() { + return this._promise + } + + resolve(value: T | PromiseLike): void { + this.resolveFn(value) + } + + reject(reason?: any): void { + this.rejectFn(reason) + } +} diff --git a/apps/app/src/ai/tools/generate-files/get-contents.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/generate-files/get-contents.ts similarity index 96% rename from apps/app/src/ai/tools/generate-files/get-contents.ts rename to apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/generate-files/get-contents.ts index d1aa6cec7..d04f1d034 100644 --- a/apps/app/src/ai/tools/generate-files/get-contents.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/generate-files/get-contents.ts @@ -1,7 +1,7 @@ -import { getModelOptions } from '@/ai/gateway'; import { streamObject, type ModelMessage } from 'ai'; import z from 'zod/v3'; -import { Deferred } from '../../../app/(app)/[orgId]/tasks/[taskId]/automation/lib/deferred'; +import { getModelOptions } from '../../tools/gateway'; +import { Deferred } from './deferred'; export type File = z.infer; diff --git a/apps/app/src/ai/tools/prompt-for-info.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/prompt-for-info.ts similarity index 100% rename from apps/app/src/ai/tools/prompt-for-info.ts rename to apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/prompt-for-info.ts diff --git a/apps/app/src/ai/tools/prompt-for-secret.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/prompt-for-secret.ts similarity index 100% rename from apps/app/src/ai/tools/prompt-for-secret.ts rename to apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/prompt-for-secret.ts diff --git a/apps/app/src/ai/tools/store-to-s3.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/store-to-s3.ts similarity index 98% rename from apps/app/src/ai/tools/store-to-s3.ts rename to apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/store-to-s3.ts index 7b2775418..f2fbd791f 100644 --- a/apps/app/src/ai/tools/store-to-s3.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/store-to-s3.ts @@ -3,7 +3,7 @@ import { PutObjectCommand } from '@aws-sdk/client-s3'; import type { UIMessage, UIMessageStreamWriter } from 'ai'; import { tool } from 'ai'; import z from 'zod/v3'; -import type { DataPart } from '../messages/data-parts'; +import type { DataPart } from '../lib/types/data-parts'; interface Params { writer: UIMessageStreamWriter>; diff --git a/apps/app/src/ai/tools/task-automation-tools.ts b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/task-automation-tools.ts similarity index 95% rename from apps/app/src/ai/tools/task-automation-tools.ts rename to apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/task-automation-tools.ts index 7a1ada6f9..519e67e73 100644 --- a/apps/app/src/ai/tools/task-automation-tools.ts +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/automation/tools/task-automation-tools.ts @@ -6,7 +6,7 @@ */ import type { InferUITools, UIMessage, UIMessageStreamWriter } from 'ai'; -import type { DataPart } from '../messages/data-parts'; +import type { DataPart } from '../lib/types/data-parts'; import { exaSearchTool } from './exa-search'; import { firecrawlTool } from './firecrawl'; import { promptForInfoTool } from './prompt-for-info'; diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/SingleTask.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/SingleTask.tsx index 0981043e7..2fe387777 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/SingleTask.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/SingleTask.tsx @@ -76,7 +76,7 @@ export function SingleTask({ task, members }: SingleTaskProps) { }; return ( -
+
{/* Main Content Layout */}
{/* Left Column - Title, Description, Content */} diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/TaskPropertiesSidebar.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/TaskPropertiesSidebar.tsx index 849a07cc0..1389cfa28 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/TaskPropertiesSidebar.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/components/TaskPropertiesSidebar.tsx @@ -3,7 +3,7 @@ import { Badge } from '@comp/ui/badge'; import { Button } from '@comp/ui/button'; import type { Control, Departments, Member, Task, TaskFrequency, TaskStatus, User } from '@db'; import { format } from 'date-fns'; -import { Sparkles } from 'lucide-react'; +import { Code } from 'lucide-react'; import Link from 'next/link'; import { useFeatureFlagEnabled } from 'posthog-js/react'; import { useState } from 'react'; @@ -271,8 +271,8 @@ export function TaskPropertiesSidebar({ className="w-full bg-primary text-primary-foreground hover:bg-primary/90" size="sm" > - - AI Automation + + Integration Builder
diff --git a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/loading.tsx b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/tasks/[taskId]/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/tests/dashboard/actions/run-tests.ts b/apps/app/src/app/(app)/[orgId]/tests/dashboard/actions/run-tests.ts index fa8de72cc..378d56534 100644 --- a/apps/app/src/app/(app)/[orgId]/tests/dashboard/actions/run-tests.ts +++ b/apps/app/src/app/(app)/[orgId]/tests/dashboard/actions/run-tests.ts @@ -1,8 +1,7 @@ 'use server'; -import { sendIntegrationResults } from '@/jobs/tasks/integration/integration-results'; +import { runIntegrationTests } from '@/jobs/tasks/integration/run-integration-tests'; import { auth } from '@/utils/auth'; -import { db } from '@db'; import { tasks } from '@trigger.dev/sdk'; import { revalidatePath } from 'next/cache'; import { headers } from 'next/headers'; @@ -27,54 +26,29 @@ export const runTests = async () => { }; } - const integrations = await db.integration.findMany({ - where: { + try { + const handle = await tasks.trigger('run-integration-tests', { organizationId: orgId, - integrationId: { - in: ['aws', 'gcp', 'azure'], - }, - }, - select: { - id: true, - name: true, - integrationId: true, - settings: true, - userSettings: true, - organization: { - select: { - id: true, - name: true, - }, - }, - }, - }); + }); + + const headersList = await headers(); + let path = + headersList.get("x-pathname") || headersList.get("referer") || ""; + path = path.replace(/\/[a-z]{2}\//, "/"); + + revalidatePath(path); - if (!integrations) { + return { + success: true, + errors: null, + taskId: handle.id, + }; + } catch (error) { + console.error('Error triggering integration tests:', error); + return { success: false, - errors: ['No integrations found'], + errors: [error instanceof Error ? error.message : 'Failed to trigger integration tests'], }; } - - const batchHandle = await tasks.batchTriggerAndWait( - 'send-integration-results', - integrations.map((integration) => ({ - payload: { - integration: { - id: integration.id, - name: integration.name, - integration_id: integration.integrationId, - settings: integration.settings, - user_settings: integration.userSettings, - organization: integration.organization, - }, - }, - })), - ); - - revalidatePath(`/${orgId}/tests/dashboard`); - return { - success: true, - errors: null, - }; }; diff --git a/apps/app/src/app/(app)/[orgId]/tests/dashboard/loading.tsx b/apps/app/src/app/(app)/[orgId]/tests/dashboard/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/tests/dashboard/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/tests/dashboard/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/tests/loading.tsx b/apps/app/src/app/(app)/[orgId]/tests/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/tests/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/tests/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/vendors/(overview)/loading.tsx b/apps/app/src/app/(app)/[orgId]/vendors/(overview)/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/vendors/(overview)/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/vendors/(overview)/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/vendors/[vendorId]/loading.tsx b/apps/app/src/app/(app)/[orgId]/vendors/[vendorId]/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/vendors/[vendorId]/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/vendors/[vendorId]/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/[orgId]/vendors/backup-overview/loading.tsx b/apps/app/src/app/(app)/[orgId]/vendors/backup-overview/loading.tsx index 9c774022b..4f38f9a92 100644 --- a/apps/app/src/app/(app)/[orgId]/vendors/backup-overview/loading.tsx +++ b/apps/app/src/app/(app)/[orgId]/vendors/backup-overview/loading.tsx @@ -1,3 +1,9 @@ import Loader from '@/components/ui/loader'; -export default Loader; +export default function Loading() { + return ( +
+ +
+ ); +} diff --git a/apps/app/src/app/(app)/onboarding/components/PostPaymentOnboarding.tsx b/apps/app/src/app/(app)/onboarding/components/PostPaymentOnboarding.tsx index 80fa16566..29fa8f756 100644 --- a/apps/app/src/app/(app)/onboarding/components/PostPaymentOnboarding.tsx +++ b/apps/app/src/app/(app)/onboarding/components/PostPaymentOnboarding.tsx @@ -94,31 +94,38 @@ export function PostPaymentOnboarding({
- {!isLoading && step && ( - + {!isLoading && ( + - ( - - - - -
- -
-
- )} - /> + {steps.map((s, idx) => ( +
+ ( + + + + +
+ +
+
+ )} + /> +
+ ))} )} diff --git a/apps/app/src/app/(app)/onboarding/hooks/usePostPaymentOnboarding.ts b/apps/app/src/app/(app)/onboarding/hooks/usePostPaymentOnboarding.ts index 11df27210..08943bcd9 100644 --- a/apps/app/src/app/(app)/onboarding/hooks/usePostPaymentOnboarding.ts +++ b/apps/app/src/app/(app)/onboarding/hooks/usePostPaymentOnboarding.ts @@ -77,11 +77,6 @@ export function usePostPaymentOnboarding({ defaultValues: { [step.key]: savedAnswers[step.key] || '' }, }); - // Reset form when step changes - useEffect(() => { - form.reset({ [step.key]: savedAnswers[step.key] || '' }); - }, [savedAnswers, step.key, form]); - // Track onboarding start useEffect(() => { trackEvent('onboarding_started', { diff --git a/apps/app/src/app/api/send-policy-email/route.ts b/apps/app/src/app/api/send-policy-email/route.ts new file mode 100644 index 000000000..ad8457774 --- /dev/null +++ b/apps/app/src/app/api/send-policy-email/route.ts @@ -0,0 +1,50 @@ +import { NextResponse, type NextRequest } from 'next/server'; +import { Novu } from '@novu/api'; + +export async function POST(request: NextRequest) { + let events; + try { + events = await request.json(); + } catch (error) { + return NextResponse.json( + { success: false, error: 'Invalid JSON in request body' }, + { status: 400 } + ); + } + + // You may want to validate required fields in the body here + // For now, we just pass the whole body to Novu + + const novuApiKey = process.env.NOVU_API_KEY; + if (!novuApiKey) { + return NextResponse.json( + { success: false, error: 'Novu API key not configured' }, + { status: 500 } + ); + } + + const novu = new Novu({ secretKey: novuApiKey }); + + try { + const result = await novu.triggerBulk({ + events: events.map((event: any) => ({ + workflowId: "new-policy-email", + to: { + subscriberId: event.subscriberId, + email: event.email, + }, + payload: event, + })), + }); + + return NextResponse.json({ success: true, result }); + } catch (error) { + return NextResponse.json( + { + success: false, + error: error instanceof Error ? error.message : 'Failed to trigger notification', + }, + { status: 500 } + ); + } +} diff --git a/apps/app/src/app/api/tasks-automations/chat/automation-prompt.md b/apps/app/src/app/api/tasks-automations/chat/automation-prompt.md deleted file mode 100644 index 458e67fd2..000000000 --- a/apps/app/src/app/api/tasks-automations/chat/automation-prompt.md +++ /dev/null @@ -1,187 +0,0 @@ -You are an Automation Assistant that creates Lambda automation scripts. - -# WORKFLOW - -When a user requests automation, ALWAYS follow this workflow: - -1. **RESPOND WITH EXPLANATION FIRST**: Start by explaining what you understand and what you plan to do -2. **THEN REQUEST INFORMATION**: If you need additional information, call the appropriate tool AFTER your explanation -3. **GENERATE LAMBDA SCRIPT**: Create the automation script following the rules below - -## IMPORTANT: Response Order - -- ALWAYS start with text explaining your understanding -- THEN call tools if you need more information -- Tools should come AFTER your explanation, not before - -**ULTIMATUM - NON-NEGOTIABLE REQUIREMENT**: -The file MUST start with `module.exports = async (event) => {` on line 1. -ABSOLUTELY NO CODE can exist outside this function. Not a single line. Not even comments. - -## Lambda Authoring Contract (STRICT) - -You MUST adhere to ALL of the following. If any are violated, REGENERATE the file to comply. - -- File begins at character 1 with: `module.exports = async (event) => {` -- No comments, whitespace, `require`, `import`, constants, or variables before this line -- No code after the closing `};` — the export closes the file -- All helper functions must be defined INSIDE the exported function as inner functions -- All `require(...)` statements must be INSIDE the exported function (dynamic require is fine) -- Do NOT use `process.env` — use `await getSecret(orgId, name)` inside the function -- JavaScript only (.js). No TypeScript syntax. No top‑level imports -- Return a JSON-serializable object `{ ok: boolean, ... }` - -## Lambda Critical Rules: - -1. **SINGLE FUNCTION EXPORT**: Every file must export exactly ONE function using `module.exports = async (event) => { ... }` -2. **NO HELPER FUNCTIONS**: ALL logic must be written inline within the exported function -3. **NO PROCESS.ENV**: NEVER use `process.env` for secrets. Use only the provided `getSecret` function -4. **JAVASCRIPT ONLY**: Write in JavaScript (.js), NOT TypeScript -5. **NO IMPORTS FOR HOST APIS**: The host provides `getSecret` and `fetch` - do NOT import these - -### Available Runtime Globals (DO NOT import/require these) - -Use these directly. They are injected by the runtime and MUST NOT be imported or required: - -- `console` -- `Buffer` -- `fetch` (via `globalThis.fetch`) -- `URL` -- `URLSearchParams` -- `AbortController` -- `setTimeout` -- `clearTimeout` - -### Networking Rules (MANDATORY) - -- Use the provided global `fetch` for ALL network calls -- NEVER use `https`, `http`, `node:https`, `node:http`, `axios`, `node-fetch`, or any other HTTP client -- Construct URLs with `URL`/`URLSearchParams` where helpful - -## Lambda File Format: - -```javascript -module.exports = async (event) => { - try { - const orgId = event?.orgId; - - // Example of allowed dynamic require inside the function - // Do not import https/http/axios. Use global fetch instead. - - // Helper functions must be nested inside - function ghRequest(path, { method = 'GET' } = {}) { - // ... - } - - // Your automation logic here - - return { ok: true, result: 'Success' }; - } catch (e) { - return { ok: false, error: e?.message || 'Unknown error' }; - } -}; -``` - -### DISALLOWED (WILL BE REJECTED) - -```javascript -// ❌ No leading comments -const https = require('https'); // ❌ No top-level requires - -module.exports = async (event) => { - /* ... */ -}; -``` - -## Verification Checklist (MANDATORY) - -Before finalizing, verify ALL of the following: - -1. The first non-whitespace characters of the file are exactly: `module.exports = async (event) => {` -2. There is NO code, comments, imports, or variables outside the exported function (before or after) -3. No `process.env`; credentials come from `getSecret` -4. JavaScript only (.js), no TypeScript syntax -5. No top-level `require` or `import` calls -6. Do not import/require provided globals (`console`, `Buffer`, `fetch`, `URL`, `URLSearchParams`, `AbortController`, `setTimeout`, `clearTimeout`) -7. No usage of `https`, `http`, `node:https`, `node:http`, `axios`, `node-fetch`; only use global `fetch` - -## Automation Workflow: - -1. Generate the automation script based on requirements -2. Use the `storeToS3` tool to save the automation -3. Use actual organization and task IDs from ACTUAL_VALUES_JSON -4. Send a final message with: - - Brief confirmation that automation was created - - Quick breakdown of what the automation does (2-3 bullet points) - - What data it will return/report -5. NEVER mention technical details like Lambda, S3, file paths, or code in your responses - -# AVAILABLE TOOLS - -- Tool: `storeToS3` -- Saves automation scripts directly to S3 -- Automatically handles all metadata and validation - -# IMPORTANT NOTES - -1. **NEVER** mix multiple concerns in the same script -2. **DO NOT** paste ANY code in chat unless the user EXPLICITLY asks to see it -3. **REMEMBER** platform credentials are managed by the system, not user-provided - -# RESPONSE FORMAT - -ALWAYS follow this order in your responses: - -1. **START WITH TEXT**: Explain what you understand and what the automation will do (in user-friendly terms) -2. **THEN USE TOOLS**: If you need information, call tools AFTER your explanation -3. **FINAL CONFIRMATION**: Confirm what the automation does, NOT technical details - -## User-Friendly Communication: - -**DO SAY:** - -- "I'll create an automation that checks if Dependabot is enabled" -- "Your automation will query GitHub and report back the results" -- "✓ Created your automation - it will check Dependabot status" - -**DON'T SAY:** - -- "I'll create a Lambda function" ❌ -- "Uploading script to S3" ❌ -- "Created automation script and saved to S3" ❌ -- Any mention of file paths, code, Lambda, or infrastructure ❌ - -## Example Response Pattern: - -**CORRECT:** - -``` -I'll create an automation that checks your GitHub repository for Dependabot configuration. To do this, I need some details about your repository. - -[THEN call promptForInfo tool] -``` - -**WRONG:** - -``` -I'll create a Lambda that calls the GitHub REST API and save it to S3. -``` - -## Final Confirmation Message Format: - -After creating the automation, send a message like this: - -``` -✓ Created your automation! Here's what it does: - -• Connects to your GitHub repository using your GITHUB_TOKEN -• Checks if Dependabot is enabled by looking for .github/dependabot.yml -• Scans vulnerability alerts settings in the repository configuration -• Returns a report showing Dependabot status and any configuration details found - -You can now test this automation to see it in action. -``` - -# Example Interaction - -User: "I need to automatically download invoices from our vendor portal every month" diff --git a/apps/app/src/app/api/tasks-automations/chat/prompt.md b/apps/app/src/app/api/tasks-automations/chat/prompt.md deleted file mode 100644 index 5cf5365f0..000000000 --- a/apps/app/src/app/api/tasks-automations/chat/prompt.md +++ /dev/null @@ -1,473 +0,0 @@ -You are an AWS Lambda Function Assistant. Your sole purpose is to help users create automation scripts that will be saved and executed as AWS Lambda functions. - -**ULTIMATUM - NON-NEGOTIABLE REQUIREMENT**: -The file MUST start with `module.exports = async (event) => {` on line 1. -ABSOLUTELY NO CODE can exist outside this function. Not a single line. Not even comments. -If you write ANYTHING before `module.exports` or after the closing `};`, the task WILL FAIL. - -CRITICAL RULES: - -1. **SINGLE FUNCTION EXPORT**: Every file must export exactly ONE function using `module.exports = async (event) => { ... }` -2. **NO HELPER FUNCTIONS**: ALL logic must be written inline within the exported function. Do NOT create any additional functions, classes, or helper methods. -3. **NO PROCESS.ENV**: NEVER use `process.env` for secrets. Use only the provided `getSecret` function. -4. **JAVASCRIPT ONLY**: Write in JavaScript (.js), NOT TypeScript. -5. **NO IMPORTS FOR HOST APIS**: The host provides `getSecret` and `fetch` - do NOT import these. -6. **NO REQUIRES OUTSIDE**: Do NOT write any `require()` statements outside the function. If you need to require something, do it INSIDE the function body. - -# Execution Environment - -- **Runtime**: Trigger.dev Node.js environment (executes automation scripts) -- **Function signature**: `module.exports = async (event) => { ... }` -- **What's in `event`**: - - `event.orgId`: The organization ID (used with getSecret) - - That's it! NO user-provided values are in event -- **Available globals** (provided by the execution environment): - - `getSecret(orgId, secretName)`: Async function to retrieve secrets - - `orgId`: Organization ID (from event.orgId) - - `secretName`: Secret name (e.g., 'GITHUB_TOKEN', 'AWS_ACCESS_KEY_ID') - MUST be from AVAILABLE_SECRETS - - `fetch`: Standards-compliant HTTP client - - `console`, `Buffer`, `URL`, `URLSearchParams`, `AbortController`, `setTimeout`, `clearTimeout` -- **Forbidden**: - - Using `process.env` for any secrets - - Importing AWS SDK to fetch secrets - - Creating helper functions or classes - - Using TypeScript - - Multiple exports - -## 🚨 CRITICAL: User Values are NOT in Event! - -**The `event` parameter ONLY contains `orgId`. Nothing else!** - -When users provide information (via promptForInfo), you must HARDCODE those values in the script: - -```javascript -// ✅ CORRECT: Hardcode user-provided values -const githubOrg = 'microsoft'; // User said their org is "microsoft" -const repoName = 'vscode'; // User said their repo is "vscode" -const region = 'us-east-1'; // User said their AWS region - -// ❌ WRONG: These will be undefined! -const githubOrg = event.githubOrg; // NO! event doesn't have this -const repoName = event.repoName; // NO! event doesn't have this -const region = event.region; // NO! event doesn't have this -``` - -**Only secrets should use getSecret(). Everything else gets hardcoded!** - -## 🎯 IMPORTANT: Use APIs, NOT SDKs! - -**ALWAYS prefer direct API calls over SDKs:** - -- ✅ Use `fetch()` with REST/HTTP APIs -- ❌ Do NOT install or import SDKs (aws-sdk, @octokit/rest, etc.) -- ❌ Do NOT use npm packages for API clients - -**Why APIs over SDKs:** - -- Smaller code footprint -- No dependency management -- Direct control over requests -- Better for serverless environments -- Always up-to-date with your research - -**Example:** - -```javascript -// ✅ GOOD: Direct API call -const response = await fetch('https://api.service.com/v1/resource', { - headers: { Authorization: `Bearer ${token}` }, -}); - -// ❌ BAD: Using SDK -// const AWS = require('aws-sdk'); // NEVER DO THIS! -``` - -# Required Function Format - -**THE FILE MUST START EXACTLY LIKE THIS (NO EXCEPTIONS):** - -```javascript -module.exports = async (event) => { - try { - // ALL code must be inside this function - // Do NOT create helper functions - // Do NOT write ANYTHING above the module.exports line - // Do NOT write ANYTHING after the closing }; - // getSecret and fetch are available as globals - const orgId = event?.orgId; - - // Example: fetching a secret (getSecret is a global) - // const token = await getSecret(orgId, 'GITHUB_TOKEN'); - - // Example: making HTTP request (fetch is a global) - // const response = await fetch('https://api.example.com', { - // headers: { 'Authorization': `Bearer ${token}` } - // }); - - return { ok: true, result: 'your result here' }; - } catch (e) { - return { ok: false, error: e?.message || 'Unknown error' }; - } -}; -// NOTHING CAN BE WRITTEN AFTER THIS LINE -``` - -# Secret Management - -**IMPORTANT**: Handle secrets appropriately based on availability: - -1. When user requests an automation that needs secrets (API tokens, credentials, etc.): - - Check AVAILABLE_SECRETS to see if the required secrets are configured - - If a required secret doesn't exist, use the `promptForSecret` tool - -2. Using the `promptForSecret` tool: - - Call this tool when you need a secret that isn't in AVAILABLE_SECRETS - - Provide clear information about why the secret is needed - - Include example values when appropriate (e.g., format of API keys) - - After calling the tool, wait for the user to respond that they've added the secret - - **IMPORTANT: Secret Description Requirements** - - Always provide a comprehensive description that includes: - - What the secret is for (e.g., "Personal access token for GitHub API") - - Required permissions/scopes (e.g., "Required scopes: repo, read:org, workflow") - - Any special configuration needed (e.g., "Must be generated from Settings > Developer settings > Personal access tokens") - - Expiration considerations (e.g., "Recommend setting expiration to 90 days") - - The description will be saved in the database for future reference - - Be specific about the minimum required permissions to follow the principle of least privilege - - Example usage: - - ``` - If the user wants GitHub automation but GITHUB_TOKEN is not in AVAILABLE_SECRETS: - - Use promptForSecret with: - - secretName: "GITHUB_TOKEN" - - description: "Personal access token for GitHub API. Required scopes: repo (full control), read:org (read org membership), read:user (read user profile data). Generate from Settings > Developer settings > Personal access tokens > Fine-grained tokens." - - category: "api_keys" - - exampleValue: "ghp_xxxxxxxxxxxxxxxxxxxx" - - reason: "This token is required to authenticate with the GitHub API and access repository information" - ``` - - More examples: - - ``` - AWS Credentials: - - secretName: "AWS_ACCESS_KEY_ID" - - description: "AWS access key ID for programmatic access. Required permissions: S3 read/write, CloudWatch logs read. Create from IAM console with minimal required permissions." - - category: "authentication" - - OpenAI API: - - secretName: "OPENAI_API_KEY" - - description: "OpenAI API key for GPT models. Requires active billing. Usage tier determines rate limits. Monitor usage to control costs." - - category: "api_keys" - - Slack Webhook: - - secretName: "SLACK_WEBHOOK_URL" - - description: "Slack incoming webhook URL for posting messages. Scoped to specific channel. Create from Slack App settings > Incoming Webhooks." - - category: "integration" - ``` - -3. After the user adds the secret: - - They will tell you they've added it - - You can then proceed with creating the automation using that secret - - The secret will be available via `getSecret(orgId, 'SECRET_NAME')` - -# Information Gathering - -**🚨 CRITICAL REQUIREMENT: ALWAYS ASK FOR SPECIFIC TARGETS! 🚨** - -**NEVER ASSUME** which organization, team, project, repository, account, or resource the user wants to target! - -Most APIs require specific identifiers: - -- GitHub: organization/username AND repository name -- AWS: account ID, region, bucket names, etc. -- Slack: workspace ID, channel ID -- Vercel: team ID, project name -- Azure/GCP: project ID, resource names - -**MANDATORY**: If the user hasn't explicitly provided these identifiers, you MUST use `promptForInfo` to ask for them BEFORE writing any automation code. - -1. **Common scenarios requiring user input**: - - "Check our GitHub repo" → Ask: Which organization? Which repository? - - "List our AWS resources" → Ask: Which AWS account? Which region? - - "Post to Slack" → Ask: Which channel? Which workspace? - - "Deploy to Vercel" → Ask: Which team? Which project? - - "Check our database" → Ask: Which database? Which table? - -2. Using the `promptForInfo` tool: - - ``` - Example 1: User says "Check our GitHub repo for security issues" - - WRONG: Assume it's the user's personal repo or company repo - RIGHT: Use promptForInfo to ask for: - - GitHub organization/username - - Repository name - - Specific branch (optional, can default to main) - - Example 2: User says "Send a Slack notification" - - WRONG: Send to #general or any assumed channel - RIGHT: Use promptForInfo to ask for: - - Slack channel ID or name - - Message format preferences - - Example 3: User says "List our AWS S3 buckets" - - WRONG: List all buckets in default region - RIGHT: Use promptForInfo to ask for: - - AWS region(s) to check - - Any specific bucket name patterns to filter - - Example 4: User says "Check Vercel deployment status" - - WRONG: Check personal account or assume project - RIGHT: Use promptForInfo to ask for: - - Vercel team ID or slug - - Project name - - Specific deployment ID (if checking one deployment) - ``` - -3. **Best practices**: - - Always ask for the most specific identifier possible - - Provide clear examples in placeholders - - Explain why you need each piece of information - - If unsure what to ask for, research the API first to understand required parameters - -4. After the user provides information: - - They will submit the form with the values - - You'll receive the information in a formatted message - - **HARDCODE these values directly in the automation script** - - Do NOT try to access them from `event` - they won't be there! - - Use these exact values - don't modify or assume variations - - Example: User provides GitHub org "microsoft" and repo "vscode" - - ```javascript - // ✅ CORRECT: Hardcode the values - const githubOrg = 'microsoft'; - const repoName = 'vscode'; - - // ❌ WRONG: Don't try to get from event - const githubOrg = event.githubOrg; // This won't work! - ``` - -# Workflow - -1. When user requests an automation: - - **FIRST**: Check if specific targets are provided (org/team/project/repo/etc.) - - **If ANY target information is missing**: IMMEDIATELY use `promptForInfo` to ask - - Do NOT proceed until you have specific identifiers - - Examples: "our repo" needs org + repo name, "our Slack" needs channel ID - - **THEN**: Check if all other required information is provided - - If additional information is missing, use `promptForInfo` to gather it - - **IF THE AUTOMATION USES ANY EXTERNAL API**: - - STOP! Research the API first using `exaSearch` and `firecrawl` - - Get the latest documentation before writing any code - - Verify endpoints, authentication, and API versions - - Check secret availability (see Secret Management section) - - If secrets are missing, use `promptForSecret` to request them - - Generate the automation script based on requirements - - Store the script directly to S3 using the `storeToS3` tool - - Use the actual organization and task IDs from ACTUAL_VALUES_JSON - - Reply with brief confirmation only - no file paths or technical details - - Do NOT paste ANY code in chat unless the user EXPLICITLY asks to see it - - Reply with ONLY "✓ Created automation script and saved to S3" - -2. S3 Storage Details: - - Key: `{ORG_ID}/{TASK_ID}.automation.js` (from ACTUAL_VALUES_JSON) - - The storeToS3 tool will handle all metadata automatically - -# S3 Metadata Requirements - -- ContentType: `application/javascript` -- User metadata: - - runtime: `nodejs20.x` - - handler: `task-fn` - - language: `javascript` - - entry: `task.js` - - packaging: `task-fn` - -# Web Research Tools - -You have access to powerful web research tools: - -1. **exaSearch** - Use this to search the web for relevant information: - - Neural search finds semantically similar content beyond keywords - - Categories: general, company, research_paper, news, github, etc. - - Can filter by date range for recent information - - Returns high-quality, relevant results - -2. **firecrawl** - Use this to extract content from specific web pages: - - Extracts clean markdown or HTML from any website - - Handles JavaScript-rendered sites - - Removes navigation, ads, and other noise - - Perfect for reading documentation, articles, or API docs - -**WORKFLOW**: First use exaSearch to find relevant URLs, then use firecrawl to extract the full content. - -# API Usage Guidelines - -**🚨 CRITICAL: MANDATORY API RESEARCH REQUIREMENT 🚨** - -YOU MUST RESEARCH THE LATEST API DOCUMENTATION BEFORE WRITING ANY CODE! - -**ALWAYS USE REST APIs DIRECTLY - NEVER USE SDKs!** - -Before writing ANY automation that uses an external API (GitHub, AWS, Slack, etc.), you are REQUIRED to: - -1. **RESEARCH FIRST** - Use exaSearch to find the official REST API documentation - - Example: `exaSearch("GitHub REST API latest documentation")` - - Example: `exaSearch("AWS S3 REST API latest documentation")` - - Example: `exaSearch("Slack Web API current authentication methods")` - - Focus on REST/HTTP API docs, NOT SDK documentation - -2. **EXTRACT DOCUMENTATION** - Use firecrawl on the documentation URLs - - This ensures you have the CURRENT API information - - APIs change frequently - your training data may be outdated! - -3. **VERIFY THESE CRITICAL DETAILS**: - - **API Version**: What's the current version? - - **Base URLs**: Are you using the correct endpoints? - - **Authentication**: Bearer tokens? API keys? OAuth? Basic auth? - - **Headers**: What headers are required? Content-Type? Accept? User-Agent? - - **Rate Limits**: What are the current rate limits? - - **Response Format**: JSON? XML? Has the schema changed? - -4. **COMMON PITFALLS TO AVOID**: - - ❌ Using SDKs instead of direct API calls - - ❌ Using old API versions from training data - - ❌ Using deprecated endpoints - - ❌ Wrong authentication headers - - ❌ Outdated request/response formats - - ❌ Installing npm packages for API clients - - ✅ Always use fetch() with REST APIs! - - ✅ Always research first, then code! - -**Example Research Workflow**: - -```javascript -// Step 1: Search for documentation -await exaSearch('GitHub REST API authentication latest documentation'); - -// Step 2: Extract the documentation -await firecrawl('https://docs.github.com/en/rest/authentication'); - -// Step 3: Search for specific endpoints -await exaSearch('GitHub API create issue endpoint latest'); - -// Step 4: Extract endpoint details -await firecrawl('https://docs.github.com/en/rest/issues/issues#create-an-issue'); - -// NOW you can write the automation with confidence! -``` - -**REMEMBER**: The user expects their automation to work with TODAY'S APIs, not outdated versions! - -# Common Patterns (ALL INLINE) - -**REMEMBER: This MUST be the ENTIRE file content. Line 1 starts with module.exports:** - -```javascript -module.exports = async (event) => { - try { - const orgId = event?.orgId; - if (!orgId) throw new Error('orgId required'); - - // HARDCODE all user-provided values (they're NOT in event!) - const teamId = 'comp-ai'; // User told us their team ID - const projectName = 'my-project'; // User told us their project - const slackChannel = '#notifications'; // User told us their channel - - // Pattern 1: Using secrets (getSecret is a global function) - const token = await getSecret(orgId, 'YOUR_SECRET_NAME'); - - // Pattern 2: Making API calls (ALL inline, no helper functions!) - // 🚨 IMPORTANT: This is just an example structure! - // You MUST use exaSearch + firecrawl to research the current API first! - // DO NOT copy these headers/endpoints - they're just placeholders! - const repoResponse = await fetch('https://api.example.com/endpoint', { - headers: { - Authorization: `Bearer ${token}`, - // ⚠️ RESEARCH REQUIRED: Check current API docs for required headers - }, - }); - - if (!repoResponse.ok) { - throw new Error(`API error: ${repoResponse.status}`); - } - - const responseData = await repoResponse.json(); - - // Pattern 3: Processing data (inline, no helper functions!) - const results = []; - // ⚠️ RESEARCH REQUIRED: API response structure varies by service - // Use exaSearch + firecrawl to understand the current response format - const additionalDataResponse = await fetch('https://api.example.com/additional-endpoint', { - headers: { - Authorization: `Bearer ${token}`, - // ⚠️ Headers must match current API requirements - }, - }); - - if (additionalDataResponse.ok) { - const additionalData = await additionalDataResponse.json(); - // Process inline - do NOT create a separate function - // ⚠️ RESEARCH REQUIRED: Data structure depends on the specific API - for (const item of additionalData) { - // Process according to current API response structure - results.push({ - // Map fields based on actual API documentation - }); - } - } - - return { - ok: true, - // ⚠️ Return structure should match what the user needs - // Research the API to understand available data - results: results, - }; - } catch (e) { - return { ok: false, error: e?.message || 'Unknown error' }; - } -}; -``` - -# Autofix Policy - -If existing code violates these rules, automatically fix WITHOUT asking: - -- `process.env` usage → Replace with `await getSecret(event.orgId, 'SECRET_NAME')` (getSecret is a global) -- Wrong export format → Convert to `module.exports = async (event) => { ... }` -- Helper functions → Inline all logic into the main function -- TypeScript → Convert to JavaScript - -# Available Secrets - -You will receive AVAILABLE_SECRETS (array of secret names) with all configured secrets. To access a secret: - -1. Check if the secret name exists in AVAILABLE_SECRETS -2. Use the exact secret name from the list -3. Call getSecret with the orgId and secret name - -Example: - -- GitHub Token: `await getSecret(orgId, 'GITHUB_TOKEN')` -- AWS Access Key: `await getSecret(orgId, 'AWS_ACCESS_KEY_ID')` -- API Key: `await getSecret(orgId, 'EXTERNAL_API_KEY')` - -# Critical Reminders - -1. **LINE 1 MUST BE**: `module.exports = async (event) => {` -2. **NOTHING** before line 1 - no comments, no requires, no imports, NOTHING -3. **NOTHING** after the final `};` - the file ends there -4. **EVERYTHING** goes inside the single exported function -5. **NO** helper functions, even small ones -6. **NO** `process.env` - use the global `getSecret` function -7. **NO** importing fetch or AWS SDK - they are provided as globals -8. **ALWAYS** return `{ ok: boolean, ... }` -9. **ALWAYS** handle errors with try/catch - -**FINAL WARNING**: If you write even a single character before `module.exports` or after the closing `};`, the Lambda will FAIL to execute. The file must contain ONLY the exported function, nothing else. diff --git a/apps/app/src/app/api/tasks-automations/chat/route.ts b/apps/app/src/app/api/tasks-automations/chat/route.ts deleted file mode 100644 index 6cd0f4d6a..000000000 --- a/apps/app/src/app/api/tasks-automations/chat/route.ts +++ /dev/null @@ -1,136 +0,0 @@ -import { Models } from '@/ai/constants'; -import { getAvailableModels, getModelOptions } from '@/ai/gateway'; -import { getTaskAutomationTools } from '@/ai/tools/task-automation-tools'; -import { db } from '@db'; -import { - convertToModelMessages, - createUIMessageStream, - createUIMessageStreamResponse, - stepCountIs, - streamText, -} from 'ai'; -import { checkBotId } from 'botid/server'; -import { NextResponse } from 'next/server'; -import { type ChatUIMessage } from '../../../(app)/[orgId]/tasks/[taskId]/automation/components/chat/types'; -import automationPrompt from './automation-prompt.md'; -import lambdaPrompt from './prompt.md'; - -// Ensure sufficient time for long-running automation orchestration -export const runtime = 'nodejs'; -export const maxDuration = 300; // seconds (Vercel max) - -interface BodyData { - messages: ChatUIMessage[]; - modelId?: string; - reasoningEffort?: 'low' | 'medium'; - orgId: string; - taskId: string; -} - -export async function POST(req: Request) { - const { isBot } = await checkBotId(); - if (isBot) { - return NextResponse.json( - { error: 'Bot is not allowed to access this endpoint' }, - { status: 401 }, - ); - } - - const [models, { messages, modelId = Models.OpenAIGPT5Mini, reasoningEffort, orgId, taskId }] = - await Promise.all([getAvailableModels(), req.json() as Promise]); - - const model = models.find((m) => m.id === modelId); - - if (!model) { - return NextResponse.json( - { - error: `Model ${modelId} not found., Valid models are: ${models.map((m) => m.id).join(', ')}`, - }, - { status: 400 }, - ); - } - - // Validate required parameters - if (!orgId || !taskId) { - return NextResponse.json( - { error: 'Missing required parameters: orgId and taskId' }, - { status: 400 }, - ); - } - - // Fetch available integrations and their secrets - // Get all configured secrets for the organization - const secrets = await db.secret.findMany({ - where: { - organizationId: orgId, - }, - select: { - name: true, - category: true, - description: true, - }, - }); - - // Build list of available secret names - const availableSecrets = secrets.map((s) => s.name); - - const actualValuesJson = JSON.stringify({ - ORG_ID: orgId, - TASK_ID: taskId, - AVAILABLE_SECRETS: availableSecrets, - }); - // Include Lambda prompt content. - // markdown loaded via webpack asset/source (string) - const fullPromptContext = `\n${lambdaPrompt}\n\n---\n`; - - const prompt = `${automationPrompt}\n\nFULL_PROMPT_CONTEXT:\n${fullPromptContext}\n\nACTUAL_VALUES_JSON:\n${actualValuesJson}`; - - return createUIMessageStreamResponse({ - stream: createUIMessageStream({ - originalMessages: messages, - execute: ({ writer }) => { - const result = streamText({ - ...getModelOptions(modelId, { reasoningEffort }), - system: prompt, - messages: convertToModelMessages( - messages.map((message) => { - message.parts = message.parts.map((part) => { - if (part.type === 'data-report-errors') { - return { - type: 'text', - text: - `There are errors in the generated code. This is the summary of the errors we have:\n` + - `\`\`\`${part.data.summary}\`\`\`\n` + - (part.data.paths?.length - ? `The following files may contain errors:\n` + - `\`\`\`${part.data.paths?.join('\n')}\`\`\`\n` - : '') + - `Fix the errors reported.`, - }; - } - return part; - }); - return message; - }), - ), - stopWhen: stepCountIs(20), - tools: getTaskAutomationTools({ writer, modelId }), - onError: (error) => { - console.error('Error communicating with AI'); - console.error(JSON.stringify(error, null, 2)); - }, - }); - result.consumeStream(); - writer.merge( - result.toUIMessageStream({ - sendReasoning: true, - sendStart: false, - messageMetadata: () => ({ - model: model.name, - }), - }), - ); - }, - }), - }); -} diff --git a/apps/app/src/app/api/tasks-automations/errors/prompt.md b/apps/app/src/app/api/tasks-automations/errors/prompt.md deleted file mode 100644 index 6ce671e7e..000000000 --- a/apps/app/src/app/api/tasks-automations/errors/prompt.md +++ /dev/null @@ -1,34 +0,0 @@ -You are an expert software engineer reviewing `stderr` logs from a development sandbox. Your task is to detect **actionable errors that require code fixes**. - -### Analysis Rules - -- Identify **real errors, failures, and critical issues** that block functionality. -- **Ignore** duplicate errors already seen in recent logs. -- **IMPORTANT**: If the same error appears multiple times (e.g., repeated Babel plugin errors, repeated module resolution errors), treat it as a single error, not multiple errors. -- **DO NOT** report errors that have already been attempted to fix in the previous conversation turns. -- Distinguish **actionable errors** from non-critical output (info messages, debug logs, expected warnings, server startup noise). -- Consider typical development issues: - - Build/compilation errors - - Runtime exceptions - - Dependency or module resolution issues - - Syntax/typing errors -- Exclude minor warnings that do not break functionality. -- If an error has been reported and fix attempted but the error persists, consider it as already handled and DO NOT report it again. - -### Output Format - -- If actionable errors are found: - - `shouldBeFixed=true` - - Provide a **clear, technical summary** including: - - Error type(s) - - Relevant file(s) or component(s) - - Specific failure reasons - - Key log snippets for context -- If no actionable errors are found: - - `shouldBeFixed=false` - - Summary must be empty - -### Requirements - -- Be **precise, concise, and actionable** — the summary will be consumed by another AI to generate fixes. -- Only include **errors that must be fixed**; do not output noise. diff --git a/apps/app/src/app/api/tasks-automations/errors/route.ts b/apps/app/src/app/api/tasks-automations/errors/route.ts deleted file mode 100644 index 250e873b4..000000000 --- a/apps/app/src/app/api/tasks-automations/errors/route.ts +++ /dev/null @@ -1,44 +0,0 @@ -import { Models } from '@/ai/constants'; -import { generateObject } from 'ai'; -import { checkBotId } from 'botid/server'; -import { NextResponse } from 'next/server'; -import { - linesSchema, - resultSchema, -} from '../../../(app)/[orgId]/tasks/[taskId]/automation/components/error-monitor/schemas'; -import prompt from './prompt.md'; - -export async function POST(req: Request) { - const { isBot } = await checkBotId(); - if (isBot) { - return NextResponse.json( - { error: 'Bot is not allowed to access this endpoint' }, - { status: 401 }, - ); - } - - const body = await req.json(); - const parsedBody = linesSchema.safeParse(body); - if (!parsedBody.success) { - return NextResponse.json({ error: `Invalid request` }, { status: 400 }); - } - - const result = await generateObject({ - system: prompt, - model: Models.OpenAIGPT5Mini, - providerOptions: { - openai: { - include: ['reasoning.encrypted_content'], - reasoningEffort: 'minimal', - reasoningSummary: 'auto', - serviceTier: 'priority', - }, - }, - messages: [{ role: 'user', content: JSON.stringify(parsedBody.data) }], - schema: resultSchema, - }); - - return NextResponse.json(result.object, { - status: 200, - }); -} diff --git a/apps/app/src/app/api/tasks-automations/lambda/functions/route.ts b/apps/app/src/app/api/tasks-automations/lambda/functions/route.ts deleted file mode 100644 index 56e09f008..000000000 --- a/apps/app/src/app/api/tasks-automations/lambda/functions/route.ts +++ /dev/null @@ -1,70 +0,0 @@ -import { s3Client } from '@/app/s3'; -import { GetObjectCommand, ListObjectsV2Command } from '@aws-sdk/client-s3'; -import { NextResponse } from 'next/server'; - -const DEFAULTS = { - region: 'us-east-1', -}; - -export async function GET(req: Request) { - try { - const url = new URL(req.url); - const orgId = url.searchParams.get('orgId'); - const bucket = process.env.TASKS_AUTOMATION_BUCKET; - const region = url.searchParams.get('region') || DEFAULTS.region; - const taskId = url.searchParams.get('taskId'); - - const res = await s3Client.send( - new ListObjectsV2Command({ Bucket: bucket, Prefix: `${orgId}/` }), - ); - const items = (res.Contents || []) - .map((o) => ({ key: o.Key!, size: o.Size ?? 0, lastModified: o.LastModified })) - .filter((o) => o.key && o.key.endsWith('.js')); - - // Optional: if taskId provided, also return its text content inline - let content: string | undefined; - if (taskId) { - try { - const key = `${orgId}/${taskId}.js`; - console.log(`[S3 API] Fetching object: ${bucket}/${key}`); - const obj = await s3Client.send(new GetObjectCommand({ Bucket: bucket, Key: key })); - const body = await obj.Body?.transformToString('utf-8'); - content = body; - console.log(`[S3 API] Fetched content:`, { - key, - contentLength: body?.length, - firstLine: body?.split('\n')[0], - lastModified: obj.LastModified, - etag: obj.ETag, - }); - } catch (error: any) { - if (error.Code === 'NoSuchKey') { - console.log(`[S3 API] Key not found: ${orgId}/${taskId}.js`); - return NextResponse.json({ error: 'Function not found' }, { status: 404 }); - } - throw error; - } - } - - return NextResponse.json( - { - bucket, - region, - orgId, - items, - content, - taskId: taskId || undefined, - }, - { - headers: { - 'Cache-Control': 'no-store, no-cache, must-revalidate, proxy-revalidate', - Pragma: 'no-cache', - Expires: '0', - }, - }, - ); - } catch (error) { - console.error('Error listing S3 objects', error); - return NextResponse.json({ error: 'Failed to list functions' }, { status: 500 }); - } -} diff --git a/apps/app/src/app/api/tasks-automations/lambda/invoke-with-logs/route.ts b/apps/app/src/app/api/tasks-automations/lambda/invoke-with-logs/route.ts deleted file mode 100644 index 8c61c57c3..000000000 --- a/apps/app/src/app/api/tasks-automations/lambda/invoke-with-logs/route.ts +++ /dev/null @@ -1,88 +0,0 @@ -import { InvokeCommand, LambdaClient } from '@aws-sdk/client-lambda'; -import { Sandbox } from '@vercel/sandbox'; -import { NextResponse } from 'next/server'; - -export const runtime = 'nodejs'; - -export async function POST(req: Request) { - try { - const { orgId, taskId, sandboxId } = await req.json(); - - if (!orgId || !taskId || !sandboxId) { - return NextResponse.json({ error: 'Missing required parameters' }, { status: 400 }); - } - - // Get AWS credentials - const credentials = - process.env.APP_AWS_ACCESS_KEY_ID && process.env.APP_AWS_SECRET_ACCESS_KEY - ? { - accessKeyId: process.env.APP_AWS_ACCESS_KEY_ID, - secretAccessKey: process.env.APP_AWS_SECRET_ACCESS_KEY, - ...(process.env.APP_AWS_SESSION_TOKEN && { - sessionToken: process.env.APP_AWS_SESSION_TOKEN, - }), - } - : undefined; - - // Invoke the Lambda - const lambda = new LambdaClient({ - region: process.env.APP_AWS_REGION || 'us-east-1', - credentials, - }); - const invokeCommand = new InvokeCommand({ - FunctionName: 'automated-tasks', - Payload: JSON.stringify({ orgId, taskId }), - }); - - const invokeResult = await lambda.send(invokeCommand); - const payloadText = new TextDecoder().decode(invokeResult.Payload); - let result; - - try { - result = JSON.parse(payloadText); - } catch { - result = { raw: payloadText }; - } - - // Get the sandbox and create a command to show the output - const sandbox = await Sandbox.get({ sandboxId }); - - // Create a simple script that just outputs the result - const outputScript = ` -console.log('AWS Lambda Invocation Result:'); -console.log('Function: automated-tasks'); -console.log('Payload: ${JSON.stringify({ orgId, taskId })}'); -console.log('\\n--- Output ---'); -console.log(${JSON.stringify(JSON.stringify(result, null, 2))}); -`; - - // Write the script - await sandbox.writeFiles([ - { path: 'lambda-output.js', content: Buffer.from(outputScript, 'utf8') }, - ]); - - // Run it to generate logs - const cmd = await sandbox.runCommand({ - detached: true, - cmd: 'node', - args: ['lambda-output.js'], - }); - - return NextResponse.json({ - ok: true, - cmdId: cmd.cmdId, - command: 'node', - args: ['lambda-output.js'], - sandboxId, - result, - statusCode: invokeResult.StatusCode, - functionError: invokeResult.FunctionError, - }); - } catch (error) { - console.error('Error invoking Lambda:', error); - return NextResponse.json( - { error: 'Failed to invoke Lambda', details: (error as Error)?.message }, - { status: 500 }, - ); - } -} diff --git a/apps/app/src/app/api/tasks-automations/lambda/invoke/route.ts b/apps/app/src/app/api/tasks-automations/lambda/invoke/route.ts deleted file mode 100644 index bb4cb813b..000000000 --- a/apps/app/src/app/api/tasks-automations/lambda/invoke/route.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { InvokeCommand, LambdaClient } from '@aws-sdk/client-lambda'; -import { NextResponse } from 'next/server'; - -const DEFAULTS = { - functionName: 'automated-tasks', - region: 'us-east-1', - orgId: 'org_689ce3dced87cc45f600a04b', - taskId: 'tsk_689ce3dd6f19f4cf1f0ea061', -}; - -export const runtime = 'nodejs'; - -export async function POST(req: Request) { - try { - const body = (await req.json()) as { - orgId?: string; - taskId?: string; - region?: string; - functionName?: string; - }; - const orgId = body.orgId || DEFAULTS.orgId; - const taskId = body.taskId || DEFAULTS.taskId; - const region = body.region || DEFAULTS.region; - const functionName = body.functionName || DEFAULTS.functionName; - - const credentials = - process.env.APP_AWS_ACCESS_KEY_ID && process.env.APP_AWS_SECRET_ACCESS_KEY - ? { - accessKeyId: process.env.APP_AWS_ACCESS_KEY_ID as string, - secretAccessKey: process.env.APP_AWS_SECRET_ACCESS_KEY as string, - } - : undefined; - - const lambda = new LambdaClient({ - region: region || process.env.APP_AWS_REGION || 'us-east-1', - credentials, - }); - const resp = await lambda.send( - new InvokeCommand({ - FunctionName: functionName, - Payload: new TextEncoder().encode(JSON.stringify({ orgId, taskId })), - }), - ); - - const payloadStr = resp.Payload ? new TextDecoder().decode(resp.Payload) : ''; - - return NextResponse.json({ - functionName, - region, - orgId, - taskId, - statusCode: resp.StatusCode, - executedVersion: resp.ExecutedVersion, - payload: payloadStr, - }); - } catch (error) { - console.error('Error invoking Lambda', error); - return NextResponse.json({ error: 'Failed to invoke Lambda' }, { status: 500 }); - } -} diff --git a/apps/app/src/app/api/tasks-automations/lambda/upload/route.ts b/apps/app/src/app/api/tasks-automations/lambda/upload/route.ts deleted file mode 100644 index 7e8da6ce4..000000000 --- a/apps/app/src/app/api/tasks-automations/lambda/upload/route.ts +++ /dev/null @@ -1,38 +0,0 @@ -import { s3Client } from '@/app/s3'; -import { PutObjectCommand } from '@aws-sdk/client-s3'; -import { NextResponse } from 'next/server'; - -export async function POST(req: Request) { - try { - const { orgId, taskId, content }: { orgId: string; taskId: string; content: string } = - await req.json(); - if (!orgId || !taskId || typeof content !== 'string') { - return NextResponse.json({ error: 'Missing orgId, taskId or content' }, { status: 400 }); - } - - const resolvedBucket = process.env.TASKS_AUTOMATION_BUCKET; - const key = `${orgId}/${taskId}.js`; - - await s3Client.send( - new PutObjectCommand({ - Bucket: resolvedBucket, - Key: key, - Body: Buffer.from(content, 'utf8'), - ContentType: 'application/javascript; charset=utf-8', - Metadata: { - runtime: 'nodejs20.x', - handler: 'task-fn', - language: 'javascript', - entry: 'task.js', - packaging: 'task-fn', - filename: key, - }, - }), - ); - - return NextResponse.json({ ok: true, bucket: resolvedBucket, key }); - } catch (error) { - console.error('Error uploading code to S3', error); - return NextResponse.json({ error: 'Failed to upload to S3' }, { status: 500 }); - } -} diff --git a/apps/app/src/app/api/tasks-automations/models/route.tsx b/apps/app/src/app/api/tasks-automations/models/route.tsx deleted file mode 100644 index 92cde0839..000000000 --- a/apps/app/src/app/api/tasks-automations/models/route.tsx +++ /dev/null @@ -1,10 +0,0 @@ -import { SUPPORTED_MODELS } from '@/ai/constants' -import { getAvailableModels } from '@/ai/gateway' -import { NextResponse } from 'next/server' - -export async function GET() { - const allModels = await getAvailableModels() - return NextResponse.json({ - models: allModels.filter((model) => SUPPORTED_MODELS.includes(model.id)), - }) -} diff --git a/apps/app/src/app/api/tasks-automations/runs/[runId]/route.ts b/apps/app/src/app/api/tasks-automations/runs/[runId]/route.ts deleted file mode 100644 index a31a01469..000000000 --- a/apps/app/src/app/api/tasks-automations/runs/[runId]/route.ts +++ /dev/null @@ -1,38 +0,0 @@ -import { runs } from '@trigger.dev/sdk'; -import { NextRequest, NextResponse } from 'next/server'; - -export async function GET( - request: NextRequest, - { params }: { params: Promise<{ runId: string }> }, -) { - try { - const { runId } = await params; - - // Get the run status from Trigger.dev - const run = await runs.retrieve(runId); - - if (!run) { - return NextResponse.json({ error: 'Run not found' }, { status: 404 }); - } - - // Log the output if the run is completed - if (run.status === 'COMPLETED' && run.output) { - console.log(`[Automation Execution] Run ${runId} completed with output:`, run.output); - } else if (run.status === 'FAILED') { - console.error(`[Automation Execution] Run ${runId} failed with error:`, run.error); - } - - // Return the run status and output - return NextResponse.json({ - id: run.id, - status: run.status, - output: run.output, - error: run.error, - createdAt: run.createdAt, - isCompleted: run.isCompleted, - }); - } catch (error) { - console.error('Error fetching run status:', error); - return NextResponse.json({ error: 'Failed to fetch run status' }, { status: 500 }); - } -} diff --git a/apps/app/src/app/api/tasks-automations/s3/get/route.ts b/apps/app/src/app/api/tasks-automations/s3/get/route.ts deleted file mode 100644 index 625dd1118..000000000 --- a/apps/app/src/app/api/tasks-automations/s3/get/route.ts +++ /dev/null @@ -1,42 +0,0 @@ -import { s3Client } from '@/app/s3'; -import { GetObjectCommand } from '@aws-sdk/client-s3'; -import { NextResponse } from 'next/server'; - -export const runtime = 'nodejs'; - -export async function GET(req: Request) { - try { - const { searchParams } = new URL(req.url); - const key = searchParams.get('key'); - - if (!key) { - return NextResponse.json({ error: 'Missing key parameter' }, { status: 400 }); - } - - // Get object from S3 - const { Body } = await s3Client.send( - new GetObjectCommand({ - Bucket: process.env.TASKS_AUTOMATION_BUCKET, - Key: key, - }), - ); - - if (!Body) { - return NextResponse.json({ error: 'Script not found' }, { status: 404 }); - } - - const content = await Body.transformToString(); - - return NextResponse.json({ - success: true, - content, - key, - }); - } catch (error) { - console.error('Error fetching from S3:', error); - return NextResponse.json( - { error: 'Failed to fetch script', details: (error as Error)?.message }, - { status: 500 }, - ); - } -} diff --git a/apps/app/src/app/api/tasks-automations/s3/list/route.ts b/apps/app/src/app/api/tasks-automations/s3/list/route.ts deleted file mode 100644 index a6bb41c2f..000000000 --- a/apps/app/src/app/api/tasks-automations/s3/list/route.ts +++ /dev/null @@ -1,45 +0,0 @@ -import { s3Client } from '@/app/s3'; -import { ListObjectsV2Command } from '@aws-sdk/client-s3'; -import { NextResponse } from 'next/server'; - -export const runtime = 'nodejs'; - -export async function GET(req: Request) { - try { - const { searchParams } = new URL(req.url); - const orgId = searchParams.get('orgId'); - - if (!orgId) { - return NextResponse.json({ error: 'Missing orgId parameter' }, { status: 400 }); - } - - // List objects in the organization's folder - const response = await s3Client.send( - new ListObjectsV2Command({ - Bucket: process.env.TASKS_AUTOMATION_BUCKET, - Prefix: `${orgId}/`, - MaxKeys: 100, - }), - ); - - const items = (response.Contents || []) - .filter((item) => item.Key?.endsWith('.js')) - .map((item) => ({ - key: item.Key!, - lastModified: item.LastModified, - size: item.Size, - })); - - return NextResponse.json({ - success: true, - items, - count: items.length, - }); - } catch (error) { - console.error('Error listing S3 objects:', error); - return NextResponse.json( - { error: 'Failed to list scripts', details: (error as Error)?.message }, - { status: 500 }, - ); - } -} diff --git a/apps/app/src/app/api/tasks-automations/s3/upload/route.ts b/apps/app/src/app/api/tasks-automations/s3/upload/route.ts deleted file mode 100644 index 9e3267dfb..000000000 --- a/apps/app/src/app/api/tasks-automations/s3/upload/route.ts +++ /dev/null @@ -1,50 +0,0 @@ -import { s3Client } from '@/app/s3'; -import { PutObjectCommand } from '@aws-sdk/client-s3'; -import { NextResponse } from 'next/server'; - -export const runtime = 'nodejs'; - -export async function POST(req: Request) { - try { - const { orgId, taskId, content, type } = await req.json(); - - if (!orgId || !taskId || !content) { - return NextResponse.json({ error: 'Missing required parameters' }, { status: 400 }); - } - - // Determine the S3 key based on the type - const s3Key = type === 'lambda' ? `${orgId}/${taskId}.js` : `${orgId}/${taskId}.${type}.js`; - const bucket = process.env.TASKS_AUTOMATION_BUCKET; - - // Upload to S3 - await s3Client.send( - new PutObjectCommand({ - Bucket: bucket, - Key: s3Key, - Body: content, - ContentType: 'application/javascript', - Metadata: { - orgId, - taskId, - type: type || 'lambda', - uploadedAt: new Date().toISOString(), - }, - }), - ); - - console.log(`Successfully uploaded ${s3Key} to S3`); - - return NextResponse.json({ - success: true, - bucket: bucket, - key: s3Key, - message: 'Script uploaded successfully', - }); - } catch (error) { - console.error('Error uploading to S3:', error); - return NextResponse.json( - { error: 'Failed to upload script to S3', details: (error as Error)?.message }, - { status: 500 }, - ); - } -} diff --git a/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/cmds/[cmdId]/logs/route.ts b/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/cmds/[cmdId]/logs/route.ts deleted file mode 100644 index 9d1d3cb7e..000000000 --- a/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/cmds/[cmdId]/logs/route.ts +++ /dev/null @@ -1,37 +0,0 @@ -import { NextResponse, type NextRequest } from 'next/server' -import { Sandbox } from '@vercel/sandbox' - -interface Params { - sandboxId: string - cmdId: string -} - -export async function GET( - _request: NextRequest, - { params }: { params: Promise } -) { - const logParams = await params - const encoder = new TextEncoder() - const sandbox = await Sandbox.get(logParams) - const command = await sandbox.getCommand(logParams.cmdId) - - return new NextResponse( - new ReadableStream({ - async pull(controller) { - for await (const logline of command.logs()) { - controller.enqueue( - encoder.encode( - JSON.stringify({ - data: logline.data, - stream: logline.stream, - timestamp: Date.now(), - }) + '\n' - ) - ) - } - controller.close() - }, - }), - { headers: { 'Content-Type': 'application/x-ndjson' } } - ) -} diff --git a/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/cmds/[cmdId]/route.tsx b/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/cmds/[cmdId]/route.tsx deleted file mode 100644 index aa0d97014..000000000 --- a/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/cmds/[cmdId]/route.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import { NextResponse, type NextRequest } from 'next/server' -import { Sandbox } from '@vercel/sandbox' - -interface Params { - sandboxId: string - cmdId: string -} - -export async function GET( - _request: NextRequest, - { params }: { params: Promise } -) { - const cmdParams = await params - const sandbox = await Sandbox.get(cmdParams) - const command = await sandbox.getCommand(cmdParams.cmdId) - - /** - * The wait can get to fail when the Sandbox is stopped but the command - * was still running. In such case we return empty for finish data. - */ - const done = await command.wait().catch(() => null) - return NextResponse.json({ - sandboxId: sandbox.sandboxId, - cmdId: command.cmdId, - startedAt: command.startedAt, - exitCode: done?.exitCode, - }) -} diff --git a/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/files/route.ts b/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/files/route.ts deleted file mode 100644 index dfc4ac6be..000000000 --- a/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/files/route.ts +++ /dev/null @@ -1,61 +0,0 @@ -import { Sandbox } from '@vercel/sandbox'; -import { NextResponse, type NextRequest } from 'next/server'; -import z from 'zod/v3'; - -const FileParamsSchema = z.object({ - sandboxId: z.string(), - path: z.string(), -}); - -export async function GET( - request: NextRequest, - { params }: { params: Promise<{ sandboxId: string }> }, -) { - const { sandboxId } = await params; - const fileParams = FileParamsSchema.safeParse({ - path: request.nextUrl.searchParams.get('path'), - sandboxId, - }); - - if (fileParams.success === false) { - return NextResponse.json( - { error: 'Invalid parameters. You must pass a `path` as query' }, - { status: 400 }, - ); - } - - const sandbox = await Sandbox.get(fileParams.data); - const stream = await sandbox.readFile(fileParams.data); - if (!stream) { - return NextResponse.json({ error: 'File not found in the Sandbox' }, { status: 404 }); - } - - return new NextResponse( - new ReadableStream({ - async pull(controller) { - for await (const chunk of stream) { - controller.enqueue(chunk); - } - controller.close(); - }, - }), - ); -} - -export async function POST( - request: NextRequest, - { params }: { params: Promise<{ sandboxId: string }> }, -) { - const { sandboxId } = await params; - const body = await request.json(); - const schema = z.object({ path: z.string(), content: z.string() }); - const parsed = schema.safeParse(body); - if (!parsed.success) { - return NextResponse.json({ error: 'Invalid body' }, { status: 400 }); - } - const sandbox = await Sandbox.get({ sandboxId }); - await sandbox.writeFiles([ - { path: parsed.data.path, content: Buffer.from(parsed.data.content, 'utf8') }, - ]); - return NextResponse.json({ ok: true, path: parsed.data.path }); -} diff --git a/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/route.tsx b/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/route.tsx deleted file mode 100644 index ea7a3c5a5..000000000 --- a/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/route.tsx +++ /dev/null @@ -1,31 +0,0 @@ -import { APIError } from '@vercel/sandbox/dist/api-client/api-error' -import { NextRequest, NextResponse } from 'next/server' -import { Sandbox } from '@vercel/sandbox' - -/** - * We must change the SDK to add data to the instance and then - * use it to retrieve the status of the Sandbox. - */ -export async function GET( - _request: NextRequest, - { params }: { params: Promise<{ sandboxId: string }> } -) { - const { sandboxId } = await params - try { - const sandbox = await Sandbox.get({ sandboxId }) - await sandbox.runCommand({ - cmd: 'echo', - args: ['Sandbox status check'], - }) - return NextResponse.json({ status: 'running' }) - } catch (error) { - if ( - error instanceof APIError && - error.json.error.code === 'sandbox_stopped' - ) { - return NextResponse.json({ status: 'stopped' }) - } else { - throw error - } - } -} diff --git a/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/run-task/route.ts b/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/run-task/route.ts deleted file mode 100644 index 4c11031f7..000000000 --- a/apps/app/src/app/api/tasks-automations/sandboxes/[sandboxId]/run-task/route.ts +++ /dev/null @@ -1,86 +0,0 @@ -import { Sandbox } from '@vercel/sandbox'; -import { NextRequest, NextResponse } from 'next/server'; - -const RUNNER_PATH = 'scripts/run-task.js'; -const EVENT_PATH = 'events/event.json'; - -function getRunnerContent() { - return ` -const taskPath = process.argv[2]; -const eventPath = process.argv[3]; -const fs = require('fs'); -const path = require('path'); - -// Mock getSecret function to match Lambda environment -async function getSecret(orgId, key) { - // For sandbox testing, just return a mock value - // In real Lambda, this would fetch from Secrets Manager - console.error('[getSecret] Mock implementation - returning null for', key); - return null; -} - -// Make getSecret and fetch available as globals like in Lambda -global.getSecret = getSecret; -global.fetch = global.fetch || require('node-fetch'); - -async function run() { - try { - // Resolve the path relative to the sandbox root - const resolvedPath = path.resolve(process.cwd(), taskPath); - - if (!fs.existsSync(resolvedPath)) { - throw new Error(\`Task file not found: \${resolvedPath}\`); - } - - const taskModule = require(resolvedPath); - const taskFunction = taskModule; // Should be the function directly - - if (typeof taskFunction !== 'function') { - throw new Error('Task module must export a function via module.exports = async (event) => { ... }'); - } - - const event = JSON.parse(fs.readFileSync(eventPath, 'utf8')); - const result = await taskFunction(event); - console.log(JSON.stringify({ ok: true, result })); - } catch (e) { - console.error(JSON.stringify({ ok: false, error: e?.message || 'Unknown error' })); - } -} - -run(); -`; -} - -export async function POST( - request: NextRequest, - { params }: { params: Promise<{ sandboxId: string }> }, -) { - const { sandboxId } = await params; - const body = await request.json(); - const taskPath: string = body?.path; - const event: unknown = body?.event ?? {}; - if (!taskPath) { - return NextResponse.json({ error: 'Missing path' }, { status: 400 }); - } - - const sandbox = await Sandbox.get({ sandboxId }); - - // Ensure runner and event file exist (idempotent) - await sandbox.writeFiles([ - { path: RUNNER_PATH, content: Buffer.from(getRunnerContent(), 'utf8') }, - { path: EVENT_PATH, content: Buffer.from(JSON.stringify(event ?? {}), 'utf8') }, - ]); - - const cmd = await sandbox.runCommand({ - detached: true, - cmd: 'node', - args: [RUNNER_PATH, taskPath, EVENT_PATH], - }); - - return NextResponse.json({ - sandboxId, - cmdId: cmd.cmdId, - command: 'node', - args: [RUNNER_PATH, taskPath, EVENT_PATH], - }); -} diff --git a/apps/app/src/app/api/tasks-automations/sandboxes/route.ts b/apps/app/src/app/api/tasks-automations/sandboxes/route.ts deleted file mode 100644 index 56c803de2..000000000 --- a/apps/app/src/app/api/tasks-automations/sandboxes/route.ts +++ /dev/null @@ -1,7 +0,0 @@ -import { Sandbox } from '@vercel/sandbox'; -import { NextResponse } from 'next/server'; - -export async function POST() { - const sandbox = await Sandbox.create({ timeout: 600000 }); - return NextResponse.json({ sandboxId: sandbox.sandboxId }); -} diff --git a/apps/app/src/app/api/tasks-automations/trigger/execute/route.ts b/apps/app/src/app/api/tasks-automations/trigger/execute/route.ts deleted file mode 100644 index b4f4ff5b2..000000000 --- a/apps/app/src/app/api/tasks-automations/trigger/execute/route.ts +++ /dev/null @@ -1,49 +0,0 @@ -import { s3Client } from '@/app/s3'; -import { executeAutomationScript } from '@/jobs/tasks/automation/execute-script'; -import { GetObjectCommand } from '@aws-sdk/client-s3'; -import { NextResponse } from 'next/server'; - -export const runtime = 'nodejs'; - -export async function POST(req: Request) { - try { - const { orgId, taskId, sandboxId } = await req.json(); - - if (!orgId || !taskId) { - return NextResponse.json({ error: 'Missing required parameters' }, { status: 400 }); - } - - // Ensure the script exists in S3 before triggering the task - try { - const { Body } = await s3Client.send( - new GetObjectCommand({ - Bucket: process.env.TASKS_AUTOMATION_BUCKET, - Key: `${orgId}/${taskId}.automation.js`, - }), - ); - await Body!.transformToString(); - } catch (error) { - console.error('Failed to fetch script from S3:', error); - return NextResponse.json({ error: 'Script not found in S3' }, { status: 404 }); - } - - // Trigger the automation execution task - const handle = await executeAutomationScript.trigger({ - orgId, - taskId, - sandboxId, - }); - - return NextResponse.json({ - success: true, - runId: handle.id, - message: 'Automation task triggered successfully. Poll for updates using the run ID.', - }); - } catch (error) { - console.error('Error executing automation script:', error); - return NextResponse.json( - { error: 'Failed to execute automation script', details: (error as Error)?.message }, - { status: 500 }, - ); - } -} diff --git a/apps/app/src/app/api/tasks-automations/workflow/analyze/route.ts b/apps/app/src/app/api/tasks-automations/workflow/analyze/route.ts deleted file mode 100644 index 6fe5f9f65..000000000 --- a/apps/app/src/app/api/tasks-automations/workflow/analyze/route.ts +++ /dev/null @@ -1,162 +0,0 @@ -import { openai } from '@ai-sdk/openai'; -import { generateObject } from 'ai'; -import { NextResponse } from 'next/server'; -import { z } from 'zod'; - -const WorkflowStepSchema = z.object({ - title: z.string().max(50).describe('A short, user-friendly title for this step (3-5 words max)'), - description: z - .string() - .max(200) - .describe('A clear explanation of what this step does in plain English'), - type: z - .enum(['trigger', 'action', 'condition', 'output']) - .describe( - 'The category of step: use "trigger" for start, "action" for operations like login/fetch/process, "condition" for if/else logic, "output" for final results', - ), - iconType: z - .enum(['start', 'fetch', 'login', 'check', 'process', 'filter', 'notify', 'complete', 'error']) - .describe('The icon type that best represents this step'), -}); - -const WorkflowAnalysisSchema = z.object({ - steps: z - .array(WorkflowStepSchema) - .max(5) - .describe('The workflow steps in order of execution (maximum 5 steps)'), -}); - -export async function POST(request: Request) { - try { - const { scriptContent } = await request.json(); - - if (!scriptContent) { - return NextResponse.json({ error: 'No script content provided' }, { status: 400 }); - } - - try { - const { object } = await generateObject({ - model: openai('gpt-4o-mini'), - schema: WorkflowAnalysisSchema, - prompt: `Analyze this Lambda function and break it down into simple, user-friendly workflow steps that a non-technical person can understand. - -Here's the script: -${scriptContent} - -CRITICAL: For the 'type' field, you MUST use one of these exact values: -- "trigger" - for the starting step -- "action" - for ANY operation (login, fetch data, process, compile, etc.) -- "condition" - for if/else or decision points -- "output" - for the final result/return step - -DO NOT use "process" as a type - use "action" instead. - -Guidelines: -1. Use simple, everyday language - avoid technical jargon -2. Focus on WHAT the automation does, not HOW it does it -3. Start with a trigger step (type: "trigger") -4. Include main actions like fetching data, logging in (type: "action") -5. Mark any if/else logic as (type: "condition") -6. End with what results are returned (type: "output") -7. Keep titles short (3-5 words) -8. Make descriptions clear and friendly -9. MAXIMUM 5 STEPS TOTAL - you MUST summarize and combine related actions -10. Group multiple similar operations into one step (e.g., "Process multiple items" instead of listing each) -11. Focus on the high-level flow, not implementation details - -Example steps with correct types: -{ - "title": "Start Automation", - "description": "Your automation begins running", - "type": "trigger", - "iconType": "start" -} -{ - "title": "Process Data", - "description": "Working with the information provided", - "type": "action", - "iconType": "process" -} -{ - "title": "Check Results", - "description": "Verifying if conditions are met", - "type": "condition", - "iconType": "check" -} -{ - "title": "Send Results", - "description": "Delivering the final information", - "type": "output", - "iconType": "complete" -}`, - }); - - return NextResponse.json({ steps: object.steps }); - } catch (aiError: any) { - console.error('AI generation error:', aiError); - - // Check if it's a validation error and try to extract what was generated - if (aiError.cause?.value?.steps) { - const rawSteps = aiError.cause.value.steps; - - // Fix invalid types - const fixedSteps = rawSteps.map((step: any, index: number) => { - let type = step.type; - - // Map invalid types to valid ones - if (!['trigger', 'action', 'condition', 'output'].includes(type)) { - if (type === 'process' || type === 'processing' || type === 'compile') { - type = 'action'; - } else if (type === 'check' || type === 'validation' || type === 'verify') { - type = 'condition'; - } else if (type === 'return' || type === 'result') { - type = 'output'; - } else if (index === 0) { - type = 'trigger'; - } else if (index === rawSteps.length - 1) { - type = 'output'; - } else { - type = 'action'; - } - } - - return { - title: (step.title || 'Step ' + (index + 1)).substring(0, 50), - description: (step.description || 'Processing...').substring(0, 200), - type, - iconType: step.iconType || 'process', - }; - }); - - return NextResponse.json({ steps: fixedSteps.slice(0, 5) }); - } - - // If we can't recover, return a generic workflow - return NextResponse.json({ - steps: [ - { - title: 'Start Automation', - description: 'The automation begins processing', - type: 'trigger', - iconType: 'start', - }, - { - title: 'Execute Logic', - description: 'Running the automation steps', - type: 'action', - iconType: 'process', - }, - { - title: 'Return Results', - description: 'Providing the final output', - type: 'output', - iconType: 'complete', - }, - ], - }); - } - } catch (error) { - console.error('Error analyzing workflow:', error); - return NextResponse.json({ error: 'Failed to analyze workflow' }, { status: 500 }); - } -} diff --git a/apps/app/src/components/header.tsx b/apps/app/src/components/header.tsx index 65e4d9185..5b938c832 100644 --- a/apps/app/src/components/header.tsx +++ b/apps/app/src/components/header.tsx @@ -4,6 +4,7 @@ import { Skeleton } from '@comp/ui/skeleton'; import { Suspense } from 'react'; import { AssistantButton } from './ai/chat-button'; import { MobileMenu } from './mobile-menu'; +import { NotificationBell } from './notifications/notification-bell'; export async function Header({ organizationId, @@ -20,7 +21,10 @@ export async function Header({ {!hideChat && } -
+
+ +
+
}> diff --git a/apps/app/src/components/notifications/notification-bell.tsx b/apps/app/src/components/notifications/notification-bell.tsx new file mode 100644 index 000000000..87cb72cc7 --- /dev/null +++ b/apps/app/src/components/notifications/notification-bell.tsx @@ -0,0 +1,98 @@ +'use client'; + +import { usePathname } from 'next/navigation'; +import { env } from '@/env.mjs'; +import { Inbox } from '@novu/nextjs'; +import { useSession } from '@/utils/auth-client'; +import { Bell, Settings } from 'lucide-react'; +import { useState, useRef, useEffect } from 'react'; + +export function NotificationBell() { + const applicationIdentifier = env.NEXT_PUBLIC_NOVU_APPLICATION_IDENTIFIER; + const { data: session } = useSession(); + const sessionData = session?.session; + const pathname = usePathname(); + const orgId = pathname?.split('/')[1] || null; + const [visible, setVisible] = useState(false); + const inboxRef = useRef(null); + + // Handle click outside to close inbox + useEffect(() => { + function handleClickOutside(event: MouseEvent) { + if (inboxRef.current && !inboxRef.current.contains(event.target as Node)) { + setVisible(false); + } + } + + if (visible) { + document.addEventListener('mousedown', handleClickOutside); + } + + return () => { + document.removeEventListener('mousedown', handleClickOutside); + }; + }, [visible]); + + // Don't render if we don't have the required config + if (!applicationIdentifier || !sessionData?.userId || !orgId) { + return null; + } + + const appearance = { + icons: { + cogs: () => , + }, + elements: { + popoverContent: { + right: '8px', + left: 'auto !important', + marginTop: '8px', + width: '360px', + borderRadius: '8px', + }, + notification: { + paddingLeft: '24px', + }, + notificationDot: { + backgroundColor: 'hsl(var(--primary))', + }, + notificationImage: { + display: 'none', + }, + notificationBar: ({ notification }: { notification: any }) => { + return notification.isRead ? 'bg-transparent' : 'bg-primary'; + } + } + }; + + return ( +
+ ( + + )} + renderSubject={(notification) => {notification.subject}} + renderBody={(notification) => ( +
+

+ {notification.body} +

+
+ )} + onNotificationClick={() => setVisible(false)} + /> +
+ ); +} diff --git a/apps/app/src/env.mjs b/apps/app/src/env.mjs index 2793771a7..522fe452e 100644 --- a/apps/app/src/env.mjs +++ b/apps/app/src/env.mjs @@ -35,6 +35,7 @@ export const env = createEnv({ GA4_API_SECRET: z.string().optional(), GA4_MEASUREMENT_ID: z.string().optional(), LINKEDIN_CONVERSIONS_ACCESS_TOKEN: z.string().optional(), + NOVU_API_KEY: z.string().optional(), }, client: { @@ -47,6 +48,7 @@ export const env = createEnv({ NEXT_PUBLIC_GOOGLE_ADS_CONVERSION_LABEL: z.string().optional(), NEXT_PUBLIC_API_URL: z.string().optional(), NEXT_PUBLIC_BETTER_AUTH_URL: z.string().optional(), + NEXT_PUBLIC_NOVU_APPLICATION_IDENTIFIER: z.string().optional(), }, runtimeEnv: { @@ -91,6 +93,8 @@ export const env = createEnv({ NEXT_PUBLIC_GOOGLE_ADS_CONVERSION_LABEL: process.env.NEXT_PUBLIC_GOOGLE_ADS_CONVERSION_LABEL, NEXT_PUBLIC_API_URL: process.env.NEXT_PUBLIC_API_URL, NEXT_PUBLIC_BETTER_AUTH_URL: process.env.NEXT_PUBLIC_BETTER_AUTH_URL, + NOVU_API_KEY: process.env.NOVU_API_KEY, + NEXT_PUBLIC_NOVU_APPLICATION_IDENTIFIER: process.env.NEXT_PUBLIC_NOVU_APPLICATION_IDENTIFIER, }, skipValidation: !!process.env.CI || !!process.env.SKIP_ENV_VALIDATION, diff --git a/apps/app/src/jobs/tasks/automation/execute-script.ts b/apps/app/src/jobs/tasks/automation/execute-script.ts deleted file mode 100644 index a53e33413..000000000 --- a/apps/app/src/jobs/tasks/automation/execute-script.ts +++ /dev/null @@ -1,330 +0,0 @@ -import { getModelOptions } from '@/ai/gateway'; -import { decrypt, type EncryptedData } from '@/lib/encryption'; -import { GetObjectCommand, S3Client } from '@aws-sdk/client-s3'; -import { db } from '@db'; -import { logger, queue, task } from '@trigger.dev/sdk'; -import { generateObject } from 'ai'; -import axios from 'axios'; -import * as cheerio from 'cheerio'; -import { z } from 'zod'; - -// Queue for automation execution -const automationExecutionQueue = queue({ - name: 'automation-execution', - concurrencyLimit: 10, -}); - -interface ExecuteScriptPayload { - orgId: string; - taskId: string; - sandboxId?: string; -} - -interface ExecutionResult { - success: boolean; - output?: any; - error?: string; - logs: string[]; -} - -export const executeAutomationScript = task({ - id: 'execute-automation-script', - queue: automationExecutionQueue, - retry: { - maxAttempts: 3, - }, - run: async (payload: ExecuteScriptPayload): Promise => { - const { orgId, taskId } = payload; - const logs: string[] = []; - - if ( - !process.env.APP_AWS_REGION || - !process.env.APP_AWS_ACCESS_KEY_ID || - !process.env.APP_AWS_SECRET_ACCESS_KEY - ) { - throw new Error('AWS S3 credentials or configuration missing. Check environment variables.'); - } - - try { - logger.info(`Executing automation script for task ${taskId} in org ${orgId}`); - - // Fetch the script from S3 - const scriptKey = `${orgId}/${taskId}.automation.js`; - logs.push(`[SYSTEM] Fetching script from S3: ${scriptKey}`); - - const s3Client = new S3Client({ - region: process.env.APP_AWS_REGION, - credentials: { - accessKeyId: process.env.APP_AWS_ACCESS_KEY_ID, - secretAccessKey: process.env.APP_AWS_SECRET_ACCESS_KEY, - }, - }); - - const { Body } = await s3Client.send( - new GetObjectCommand({ - Bucket: process.env.TASKS_AUTOMATION_BUCKET, - Key: scriptKey, - }), - ); - - if (!Body) { - throw new Error('Script not found in S3'); - } - - const scriptContent = await Body.transformToString(); - logs.push(`[SYSTEM] Script loaded successfully (${scriptContent.length} bytes)`); - - // Fetch all available integrations for this org to provide context - // Get all secrets for this org - const secrets = await db.secret.findMany({ - where: { - organizationId: orgId, - }, - select: { - id: true, - name: true, - value: true, - }, - }); - - const availableSecrets = secrets.map((s) => s.name); - logs.push(`[SYSTEM] Available secrets: ${availableSecrets.join(', ') || 'none'}`); - - // Create the getSecret function (no integrationId parameter needed) - const getSecret = async ( - providedOrgId: string, - secretName: string, - ): Promise => { - try { - // Validate the org ID matches - if (providedOrgId !== orgId) { - logs.push( - `[SYSTEM] Warning: getSecret called with different orgId: ${providedOrgId} (expected: ${orgId})`, - ); - return null; - } - - logs.push(`[SYSTEM] Fetching secret '${secretName}'`); - - // Find the secret - const secret = secrets.find((s) => s.name === secretName); - - if (!secret) { - logs.push(`[SYSTEM] Secret '${secretName}' not found`); - return null; - } - - // Decrypt the secret value - const decryptedValue = await decrypt(JSON.parse(secret.value) as EncryptedData); - logs.push(`[SYSTEM] Secret '${secretName}' successfully retrieved`); - - // Update last used timestamp (fire and forget) - db.secret - .update({ - where: { id: secret.id }, - data: { lastUsedAt: new Date() }, - }) - .catch((err) => { - logger.warn('Failed to update secret last used timestamp', { - error: err, - secretId: secret.id, - }); - }); - - return decryptedValue; - } catch (error) { - const errorMsg = error instanceof Error ? error.message : String(error); - logs.push(`[SYSTEM] Error retrieving secret '${secretName}': ${errorMsg}`); - return null; - } - }; - - // Create a custom console that captures logs - const customConsole = { - log: (...args: any[]) => { - const message = args - .map((arg) => (typeof arg === 'object' ? JSON.stringify(arg, null, 2) : String(arg))) - .join(' '); - logs.push(`[LOG] ${message}`); - logger.info(message); - }, - error: (...args: any[]) => { - const message = args - .map((arg) => (typeof arg === 'object' ? JSON.stringify(arg, null, 2) : String(arg))) - .join(' '); - logs.push(`[ERROR] ${message}`); - logger.error(message); - }, - warn: (...args: any[]) => { - const message = args - .map((arg) => (typeof arg === 'object' ? JSON.stringify(arg, null, 2) : String(arg))) - .join(' '); - logs.push(`[WARN] ${message}`); - logger.warn(message); - }, - info: (...args: any[]) => { - const message = args - .map((arg) => (typeof arg === 'object' ? JSON.stringify(arg, null, 2) : String(arg))) - .join(' '); - logs.push(`[INFO] ${message}`); - logger.info(message); - }, - }; - - // Create a context with safe globals - const context = { - console: customConsole, - // Provide getSecret function - getSecret, - // Provide list of available secrets for reference - AVAILABLE_SECRETS: availableSecrets, - // Provide common utilities - axios, - cheerio, - fetch: globalThis.fetch, - Buffer, - URL, - URLSearchParams, - setTimeout, - clearTimeout, - Promise, - Date, - Math, - JSON, - Object, - Array, - String, - Number, - Boolean, - RegExp, - // Helpers for module exports - module: { exports: {} }, - exports: {}, - }; - - logs.push('[SYSTEM] Starting script execution...'); - - // Wrap the script in an async function to support top-level await - const wrappedScript = ` - return (async function() { - ${scriptContent} - return module.exports; - })() - `; - - // Create a function with the context - const AsyncFunction = Object.getPrototypeOf(async function () {}).constructor; - const executeScript = new AsyncFunction(...Object.keys(context), wrappedScript); - - // Execute the script with timeout - const timeoutPromise = new Promise((_, reject) => { - setTimeout(() => reject(new Error('Script execution timed out after 5 minutes')), 300000); - }); - - const scriptPromise = executeScript(...Object.values(context)); - const scriptModule = await Promise.race([scriptPromise, timeoutPromise]); - - // Handle different script formats - let result; - - // Log what we're about to execute - logs.push(`[SYSTEM] Script module type: ${typeof scriptModule}`); - - if (typeof scriptModule === 'function') { - // If the script exports a function, call it - const eventObject = { orgId, taskId }; - logs.push( - `[SYSTEM] Calling exported function with event object: ${JSON.stringify(eventObject)}`, - ); - try { - result = await scriptModule(eventObject); - logs.push(`[SYSTEM] Function execution completed, result type: ${typeof result}`); - } catch (funcError) { - const errorMessage = funcError instanceof Error ? funcError.message : String(funcError); - logs.push(`[SYSTEM] Function execution error: ${errorMessage}`); - throw funcError; - } - } else if (scriptModule && typeof scriptModule.run === 'function') { - // If the script exports an object with a run method - logs.push('[SYSTEM] Calling run method with event object'); - result = await scriptModule.run({ orgId, taskId }); - } else if (scriptModule && typeof scriptModule.default === 'function') { - // If the script has a default export that's a function - logs.push('[SYSTEM] Calling default export function with event object'); - result = await scriptModule.default({ orgId, taskId }); - } else { - // Otherwise, assume the script ran its logic and return the module - logs.push('[SYSTEM] Returning module as-is'); - result = scriptModule; - } - - logs.push(`[SYSTEM] Function returned: ${JSON.stringify(result)}`); - - logs.push('[SYSTEM] Script execution completed successfully'); - - // Log the output for debugging - console.log(`[Automation Execution] Script output for ${orgId}/${taskId}:`, result); - - // Create a friendly summary using AI (structured) - let summary: string | undefined; - try { - const { object } = await generateObject({ - ...getModelOptions('gpt-4o-mini'), - system: - 'You are a helpful assistant that summarizes automation test results. Focus only on describing what happened or what was found. Do not provide advice, suggestions, or commentary. Be factual and concise. 1-2 short sentences.', - prompt: `Summarize what this automation discovered or accomplished. Focus only on the outcome, not advice.\nRESULT:\n${JSON.stringify( - result, - )}\n\nRECENT_LOGS:\n${logs.slice(-20).join('\n')}`, - schema: z.object({ summary: z.string().min(1) }), - }); - summary = object.summary; - } catch {} - - return { - success: true, - output: result, - logs, - // @ts-expect-error propagate summary to API mapper - summary, - }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - const errorStack = error instanceof Error ? error.stack : undefined; - - logs.push(`[SYSTEM] Script execution failed: ${errorMessage}`); - if (errorStack) { - logs.push(`[SYSTEM] Stack trace: ${errorStack}`); - } - - logger.error('Automation script execution failed', { - error: errorMessage, - stack: errorStack, - orgId, - taskId, - }); - - // Friendly error summary (structured) - let summary: string | undefined; - try { - const { object } = await generateObject({ - ...getModelOptions('gpt-4o-mini'), - system: - 'You are a helpful assistant that explains an automation test failure to an end-user in a friendly, concise way. Avoid technical jargon. 1-2 short sentences.', - prompt: `Summarize this failure for an end user.\nERROR:\n${errorMessage}\n\nRECENT_LOGS:\n${logs - .slice(-20) - .join('\n')}`, - schema: z.object({ summary: z.string().min(1) }), - }); - summary = object.summary; - } catch {} - - return { - success: false, - error: errorMessage, - logs, - // @ts-expect-error propagate summary to API mapper - summary, - }; - } - }, -}); diff --git a/apps/app/src/jobs/tasks/integration/run-integration-tests.ts b/apps/app/src/jobs/tasks/integration/run-integration-tests.ts new file mode 100644 index 000000000..705eb4798 --- /dev/null +++ b/apps/app/src/jobs/tasks/integration/run-integration-tests.ts @@ -0,0 +1,80 @@ +import { db } from '@db'; +import { logger, task } from '@trigger.dev/sdk'; +import { sendIntegrationResults } from './integration-results'; + +export const runIntegrationTests = task({ + id: 'run-integration-tests', + run: async (payload: { organizationId: string }) => { + const { organizationId } = payload; + + logger.info(`Running integration tests for organization: ${organizationId}`); + + const integrations = await db.integration.findMany({ + where: { + organizationId: organizationId, + integrationId: { + in: ['aws', 'gcp', 'azure'], + }, + }, + select: { + id: true, + name: true, + integrationId: true, + settings: true, + userSettings: true, + organization: { + select: { + id: true, + name: true, + }, + }, + }, + }); + + if (!integrations || integrations.length === 0) { + logger.warn(`No integrations found for organization: ${organizationId}`); + return { + success: false, + error: 'No integrations found', + organizationId, + }; + } + + logger.info(`Found ${integrations.length} integrations to test for organization: ${organizationId}`); + + const batchItems = integrations.map((integration) => ({ + payload: { + integration: { + id: integration.id, + name: integration.name, + integration_id: integration.integrationId, + settings: integration.settings, + user_settings: integration.userSettings, + organization: integration.organization, + }, + }, + })); + + try { + const batchHandle = await sendIntegrationResults.batchTriggerAndWait(batchItems); + + logger.info(`Successfully completed batch integration tests for organization: ${organizationId}`); + + return { + success: true, + organizationId, + integrationsCount: integrations.length, + batchHandleId: batchHandle.id, + }; + } catch (error) { + logger.error(`Failed to run integration tests for organization ${organizationId}: ${error}`); + + return { + success: false, + error: error instanceof Error ? error.message : String(error), + organizationId, + integrationsCount: integrations.length, + }; + } + }, +}); diff --git a/apps/app/src/jobs/tasks/task/policy-schedule.ts b/apps/app/src/jobs/tasks/task/policy-schedule.ts index 32fed28a0..c5567f6d8 100644 --- a/apps/app/src/jobs/tasks/task/policy-schedule.ts +++ b/apps/app/src/jobs/tasks/task/policy-schedule.ts @@ -1,5 +1,6 @@ +import { env } from '@/env.mjs'; import { db } from '@db'; -import { sendPolicyReviewNotificationEmail } from '@trycompai/email'; +import { Novu } from '@novu/api'; import { logger, schedules } from '@trigger.dev/sdk'; export const policySchedule = schedules.task({ @@ -9,6 +10,10 @@ export const policySchedule = schedules.task({ run: async () => { const now = new Date(); + const novu = new Novu({ + secretKey: process.env.NOVU_API_KEY + }); + // Find all published policies that have a review date and frequency set const candidatePolicies = await db.policy.findMany({ where: { @@ -23,12 +28,33 @@ export const policySchedule = schedules.task({ include: { organization: { select: { + id: true, name: true, + members: { + where: { + role: { contains: 'owner' } + }, + select: { + user: { + select: { + id: true, + name: true, + email: true, + }, + }, + }, + }, }, }, assignee: { - include: { - user: true, + select: { + user: { + select: { + id: true, + name: true, + email: true, + }, + }, }, }, }, @@ -96,101 +122,74 @@ export const policySchedule = schedules.task({ }, }); - // Log details about updated policies - overduePolicies.forEach((policy) => { - logger.info( - `Updated policy "${policy.name}" (${policy.id}) from org "${policy.organization.name}" - frequency ${policy.frequency} - last reviewed ${policy.reviewDate?.toISOString()}`, - ); - }); - - logger.info(`Successfully updated ${updateResult.count} policies to "needs_review" status`); - - // Build a map of owners by organization for targeted notifications - const uniqueOrgIds = Array.from(new Set(overduePolicies.map((p) => p.organizationId))); - const owners = await db.member.findMany({ - where: { - organizationId: { in: uniqueOrgIds }, - isActive: true, - // role is a comma-separated string sometimes - role: { contains: 'owner' }, - }, - include: { - user: true, - }, - }); - - const ownersByOrgId = new Map(); - owners.forEach((owner) => { - const email = owner.user?.email; - if (!email) return; - const list = ownersByOrgId.get(owner.organizationId) ?? []; - list.push({ email, name: owner.user.name ?? email }); - ownersByOrgId.set(owner.organizationId, list); - }); - - // Send review notifications to org owners and the policy assignee only - // Send review notifications to org owners and the policy assignee only, rate-limited to 2 emails/sec - const EMAIL_BATCH_SIZE = 2; - const EMAIL_BATCH_DELAY_MS = 1000; - - // Build a flat list of all emails to send, with their policy context - type EmailJob = { + // Build array of recipients (org owner(s) and policy assignee(s)) for each overdue policy + const recipientsMap = new Map(); + const addRecipients = ( + users: Array<{ user: { id: string; email: string; name?: string } }>, + policy: typeof overduePolicies[number], + ) => { + for (const entry of users) { + const user = entry.user; + if (user && user.email && user.id) { + const key = `${user.id}-${policy.id}`; + if (!recipientsMap.has(key)) { + recipientsMap.set(key, { + email: user.email, + userId: user.id, + name: user.name ?? '', + policy, + }); + } + } + } }; - const emailJobs: EmailJob[] = []; + // trigger notification for each policy for (const policy of overduePolicies) { - const recipients = new Map(); // email -> name - - // Assignee (if any) - const assigneeEmail = policy.assignee?.user?.email; - if (assigneeEmail) { - recipients.set(assigneeEmail, policy.assignee?.user?.name ?? assigneeEmail); - } - - // Organization owners - const orgOwners = ownersByOrgId.get(policy.organizationId) ?? []; - orgOwners.forEach((o) => recipients.set(o.email, o.name)); - - if (recipients.size === 0) { - logger.info(`No recipients found for policy ${policy.id} (${policy.name})`); - continue; + // Org owners + if (policy.organization && Array.isArray(policy.organization.members)) { + addRecipients(policy.organization.members, policy); } - - for (const [email, name] of recipients.entries()) { - emailJobs.push({ email, name, policy }); + // Policy assignee + if (policy.assignee) { + addRecipients([policy.assignee], policy); } } - // Send emails in batches of EMAIL_BATCH_SIZE per second - for (let i = 0; i < emailJobs.length; i += EMAIL_BATCH_SIZE) { - const batch = emailJobs.slice(i, i + EMAIL_BATCH_SIZE); - - await Promise.all( - batch.map(async ({ email, name, policy }) => { - try { - await sendPolicyReviewNotificationEmail({ - email, - userName: name, - policyName: policy.name, - organizationName: policy.organization.name, - organizationId: policy.organizationId, - policyId: policy.id, - }); - logger.info(`Sent policy review notification to ${email} for policy ${policy.id}`); - } catch (emailError) { - logger.error(`Failed to send review email to ${email} for policy ${policy.id}: ${emailError}`); - } - }), + // Final deduplicated recipients array + const recipients = Array.from(recipientsMap.values()); + novu.triggerBulk({ + events: recipients.map((recipient) => ({ + workflowId: 'policy-review-required', + to: { + subscriberId: `${recipient.userId}-${recipient.policy.organizationId}`, + email: recipient.email, + }, + payload: { + email: recipient.email, + userName: recipient.name, + policyName: recipient.policy.name, + organizationName: recipient.policy.organization.name, + organizationId: recipient.policy.organizationId, + policyId: recipient.policy.id, + policyUrl: `${process.env.NEXT_PUBLIC_APP_URL ?? 'https://app.trycomp.ai'}/${recipient.policy.organizationId}/policies/${recipient.policy.id}`, + } + })), + }); + + // Log details about updated policies + overduePolicies.forEach((policy) => { + logger.info( + `Updated policy "${policy.name}" (${policy.id}) from org "${policy.organization.name}" - frequency ${policy.frequency} - last reviewed ${policy.reviewDate?.toISOString()}`, ); + }); - // Only delay if there are more emails to send - if (i + EMAIL_BATCH_SIZE < emailJobs.length) { - await new Promise((resolve) => setTimeout(resolve, EMAIL_BATCH_DELAY_MS)); - } - } + logger.info(`Successfully updated ${updateResult.count} policies to "needs_review" status`); return { success: true, diff --git a/apps/app/src/jobs/tasks/task/task-schedule.ts b/apps/app/src/jobs/tasks/task/task-schedule.ts index 303d781ed..bf43d4ded 100644 --- a/apps/app/src/jobs/tasks/task/task-schedule.ts +++ b/apps/app/src/jobs/tasks/task/task-schedule.ts @@ -1,5 +1,5 @@ import { db } from '@db'; -import { sendTaskReviewNotificationEmail } from '@trycompai/email'; +import { Novu } from '@novu/api'; import { logger, schedules } from '@trigger.dev/sdk'; export const taskSchedule = schedules.task({ @@ -8,6 +8,9 @@ export const taskSchedule = schedules.task({ maxDuration: 1000 * 60 * 10, // 10 minutes run: async () => { const now = new Date(); + const novu = new Novu({ + secretKey: process.env.NOVU_API_KEY + }); // Find all Done tasks that have a review date and frequency set const candidateTasks = await db.task.findMany({ @@ -23,18 +26,39 @@ export const taskSchedule = schedules.task({ include: { organization: { select: { + id: true, name: true, + members: { + where: { + role: { contains: 'owner' } + }, + select: { + user: { + select: { + id: true, + name: true, + email: true, + }, + }, + }, + }, }, }, assignee: { - include: { - user: true, + select: { + user: { + select: { + id: true, + name: true, + email: true, + }, + }, }, }, }, }); - // Helpers to compute next due date based on frequency + // FIle all tasks past their review deadline. const addDaysToDate = (date: Date, days: number) => { const result = new Date(date.getTime()); result.setDate(result.getDate() + days); @@ -90,8 +114,8 @@ export const taskSchedule = schedules.task({ }; } - // Update all overdue tasks to "todo" status try { + // Update all overdue tasks to "todo" status const taskIds = overdueTasks.map((task) => task.id); const updateResult = await db.task.updateMany({ @@ -105,119 +129,74 @@ export const taskSchedule = schedules.task({ }, }); - - - // Log details about updated tasks - overdueTasks.forEach((task) => { - logger.info( - `Updated task "${task.title}" (${task.id}) from org "${task.organization.name}" - frequency ${task.frequency} - last reviewed ${task.reviewDate?.toISOString()}`, - ); - }); - - logger.info(`Successfully updated ${updateResult.count} tasks to "todo" status`); - - // Build a map of admins by organization for targeted notifications - const uniqueOrgIds = Array.from(new Set(overdueTasks.map((t) => t.organizationId))); - const admins = await db.member.findMany({ - where: { - organizationId: { in: uniqueOrgIds }, - isActive: true, - // role is a comma-separated string sometimes - role: { contains: 'admin' }, - }, - include: { - user: true, - }, - }); - - const adminsByOrgId = new Map(); - admins.forEach((admin) => { - const email = admin.user?.email; - if (!email) return; - const list = adminsByOrgId.get(admin.organizationId) ?? []; - list.push({ email, name: admin.user.name ?? email }); - adminsByOrgId.set(admin.organizationId, list); - }); - - // Rate limit: 2 emails per second - const EMAIL_BATCH_SIZE = 2; - const EMAIL_BATCH_DELAY_MS = 1000; - - // Build a flat list of email jobs - type EmailJob = { + const recipientsMap = new Map { - switch (frequency) { - case 'daily': - return addDaysToDate(reviewDate, 1); - case 'weekly': - return addDaysToDate(reviewDate, 7); - case 'monthly': - return addMonthsToDate(reviewDate, 1); - case 'quarterly': - return addMonthsToDate(reviewDate, 3); - case 'yearly': - return addMonthsToDate(reviewDate, 12); - default: - return null; + }>(); + const addRecipients = ( + users: Array<{ user: { id: string; email: string; name?: string } }>, + task: typeof overdueTasks[number], + ) => { + for (const entry of users) { + const user = entry.user; + if (user && user.email && user.id) { + const key = `${user.id}-${task.id}`; + if (!recipientsMap.has(key)) { + recipientsMap.set(key, { + email: user.email, + userId: user.id, + name: user.name ?? '', + task, + }); + } + } } }; + // Find recipients (org owner and assignee) for each task and add to recipientsMap for (const task of overdueTasks) { - const recipients = new Map(); // email -> name - - // Assignee (if any) - const assigneeEmail = task.assignee?.user?.email; - if (assigneeEmail) { - recipients.set(assigneeEmail, task.assignee?.user?.name ?? assigneeEmail); - } - - // Organization admins - const orgAdmins = adminsByOrgId.get(task.organizationId) ?? []; - orgAdmins.forEach((a) => recipients.set(a.email, a.name)); - - if (recipients.size === 0) { - logger.info(`No recipients found for task ${task.id} (${task.title})`); - continue; + // Org owners + if (task.organization && Array.isArray(task.organization.members)) { + addRecipients(task.organization.members, task); } - - for (const [email, name] of recipients.entries()) { - emailJobs.push({ email, name, task }); + // Policy assignee + if (task.assignee) { + addRecipients([task.assignee], task); } } - for (let i = 0; i < emailJobs.length; i += EMAIL_BATCH_SIZE) { - const batch = emailJobs.slice(i, i + EMAIL_BATCH_SIZE); - - await Promise.all( - batch.map(async ({ email, name, task }) => { - try { - await sendTaskReviewNotificationEmail({ - email, - userName: name, - taskName: task.title, - organizationName: task.organization.name, - organizationId: task.organizationId, - taskId: task.id, - }); - logger.info(`Sent task review notification to ${email} for task ${task.id}`); - } catch (emailError) { - logger.error(`Failed to send review email to ${email} for task ${task.id}: ${emailError}`); - } - }), + // Final deduplicated recipients array. + const recipients = Array.from(recipientsMap.values()); + // Trigger notification for each recipient. + novu.triggerBulk({ + events: recipients.map((recipient) => ({ + workflowId: 'task-review-required', + to: { + subscriberId: `${recipient.userId}-${recipient.task.organizationId}`, + email: recipient.email, + }, + payload: { + email: recipient.email, + userName: recipient.name, + taskName: recipient.task.title, + organizationName: recipient.task.organization.name, + organizationId: recipient.task.organizationId, + taskId: recipient.task.id, + taskUrl: `${process.env.NEXT_PUBLIC_APP_URL ?? 'https://app.trycomp.ai'}/${recipient.task.organizationId}/tasks/${recipient.task.id}`, + } + })), + }); + + // Log details about updated tasks + overdueTasks.forEach((task) => { + logger.info( + `Updated task "${task.title}" (${task.id}) from org "${task.organization.name}" - frequency ${task.frequency} - last reviewed ${task.reviewDate?.toISOString()}`, ); + }); - // Only delay if there are more emails to send - if (i + EMAIL_BATCH_SIZE < emailJobs.length) { - await new Promise((resolve) => setTimeout(resolve, EMAIL_BATCH_DELAY_MS)); - } - } + logger.info(`Successfully updated ${updateResult.count} tasks to "todo" status`); return { success: true, diff --git a/apps/app/src/styles/globals.css b/apps/app/src/styles/globals.css index 29fc6d56c..7052c7f42 100644 --- a/apps/app/src/styles/globals.css +++ b/apps/app/src/styles/globals.css @@ -50,3 +50,57 @@ body { background-color: hsl(var(--primary)); color: white; } + +/* Custom thin scrollbar for chat */ +.chat-scrollbar::-webkit-scrollbar { + width: 3px; +} + +.chat-scrollbar::-webkit-scrollbar-track { + background: transparent; +} + +.chat-scrollbar::-webkit-scrollbar-thumb { + background: hsl(var(--muted-foreground) / 0.15); + border-radius: 2px; +} + +.chat-scrollbar::-webkit-scrollbar-thumb:hover { + background: hsl(var(--muted-foreground) / 0.25); +} + +/* Firefox scrollbar */ +.chat-scrollbar { + scrollbar-width: thin; + scrollbar-color: hsl(var(--muted-foreground) / 0.15) transparent; +} + +/* Workflow card animations */ +@keyframes reveal-step { + from { + opacity: 0; + transform: translateY(16px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +@keyframes zoom-icon { + from { + transform: scale(0); + } + to { + transform: scale(1); + } +} + +@keyframes expand-height { + from { + max-height: 0; + } + to { + max-height: var(--final-height); + } +} diff --git a/apps/portal/src/app/actions/login.ts b/apps/portal/src/app/actions/login.ts index 6b2d9778d..2a26187d2 100644 --- a/apps/portal/src/app/actions/login.ts +++ b/apps/portal/src/app/actions/login.ts @@ -4,7 +4,41 @@ import { auth } from '@/app/lib/auth'; import { createSafeActionClient } from 'next-safe-action'; import { z } from 'zod'; -export const login = createSafeActionClient() +const handleServerError = (e: Error) => { + if (e instanceof Error) { + // Check for common OTP-related error messages + const errorMessage = e.message.toLowerCase(); + console.error('Error message (lowercase):', errorMessage); + + if (errorMessage.includes('invalid') && errorMessage.includes('otp')) { + return 'Invalid OTP code. Please check your code and try again.'; + } + + if (errorMessage.includes('expired') && errorMessage.includes('otp')) { + return 'OTP code has expired. Please request a new code.'; + } + + if (errorMessage.includes('not found') || errorMessage.includes('user not found')) { + return 'No account found with this email address.'; + } + + // For other authentication errors, provide a more specific message + if (errorMessage.includes('unauthorized') || errorMessage.includes('authentication')) { + return 'Authentication failed. Please try again.'; + } + + if (errorMessage.includes('too many attempts')) { + return 'Too many requests. Please try again later.'; + } + + // If we can't match a specific error, throw a generic but helpful message + return 'Login failed. Please check your OTP code and try again.'; + } + + return 'Something went wrong while executing the operation'; +}; + +export const login = createSafeActionClient({ handleServerError }) .inputSchema( z.object({ otp: z.string(), diff --git a/apps/portal/src/app/lib/auth.ts b/apps/portal/src/app/lib/auth.ts index b7eadfcb3..45b8fc073 100644 --- a/apps/portal/src/app/lib/auth.ts +++ b/apps/portal/src/app/lib/auth.ts @@ -5,6 +5,7 @@ import { prismaAdapter } from 'better-auth/adapters/prisma'; import { nextCookies } from 'better-auth/next-js'; import { emailOTP, multiSession, organization } from 'better-auth/plugins'; import { ac, admin, auditor, employee, owner } from './permissions'; +import { env } from '@/env.mjs'; export const auth = betterAuth({ database: prismaAdapter(db, { @@ -16,7 +17,7 @@ export const auth = betterAuth({ generateId: false, }, trustedOrigins: ['http://localhost:3000', 'https://*.trycomp.ai'], - secret: process.env.AUTH_SECRET!, + secret: env.AUTH_SECRET!, plugins: [ organization({ membershipLimit: 100000000000, diff --git a/apps/portal/src/env.mjs b/apps/portal/src/env.mjs index 07eed2712..2f7086a70 100644 --- a/apps/portal/src/env.mjs +++ b/apps/portal/src/env.mjs @@ -10,6 +10,7 @@ export const env = createEnv({ UPSTASH_REDIS_REST_TOKEN: z.string().optional(), AUTH_GOOGLE_ID: z.string(), AUTH_GOOGLE_SECRET: z.string(), + AUTH_SECRET: z.string(), }, client: { @@ -29,6 +30,7 @@ export const env = createEnv({ UPSTASH_REDIS_REST_TOKEN: process.env.UPSTASH_REDIS_REST_TOKEN, AUTH_GOOGLE_ID: process.env.AUTH_GOOGLE_ID, AUTH_GOOGLE_SECRET: process.env.AUTH_GOOGLE_SECRET, + AUTH_SECRET: process.env.AUTH_SECRET, }, skipValidation: !!process.env.CI || !!process.env.SKIP_ENV_VALIDATION, diff --git a/bun.lock b/bun.lock index 6fc52fd2a..ba3f7bf35 100644 --- a/bun.lock +++ b/bun.lock @@ -142,6 +142,8 @@ "@monaco-editor/react": "^4.7.0", "@nangohq/frontend": "^0.53.2", "@next/third-parties": "^15.3.1", + "@novu/api": "^1.6.0", + "@novu/nextjs": "^3.10.1", "@number-flow/react": "^0.5.9", "@prisma/client": "^6.13.0", "@prisma/instrumentation": "6.6.0", @@ -750,6 +752,8 @@ "@comp/portal": ["@comp/portal@workspace:apps/portal"], + "@corvu/utils": ["@corvu/utils@0.4.2", "", { "dependencies": { "@floating-ui/dom": "^1.6.11" }, "peerDependencies": { "solid-js": "^1.8" } }, "sha512-Ox2kYyxy7NoXdKWdHeDEjZxClwzO4SKM8plAaVwmAJPxHMqA0rLOoAsa+hBDwRLpctf+ZRnAd/ykguuJidnaTA=="], + "@cspotcode/source-map-support": ["@cspotcode/source-map-support@0.8.1", "", { "dependencies": { "@jridgewell/trace-mapping": "0.3.9" } }, "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw=="], "@csstools/color-helpers": ["@csstools/color-helpers@5.1.0", "", {}, "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA=="], @@ -976,6 +980,10 @@ "@inquirer/type": ["@inquirer/type@3.0.8", "", { "peerDependencies": { "@types/node": ">=18" }, "optionalPeers": ["@types/node"] }, "sha512-lg9Whz8onIHRthWaN1Q9EGLa/0LFJjyM8mEUbL1eTi6yMGvBf8gvyDLtxSXztQsxMvhxxNpJYrwa1YHdq+w4Jw=="], + "@internationalized/date": ["@internationalized/date@3.10.0", "", { "dependencies": { "@swc/helpers": "^0.5.0" } }, "sha512-oxDR/NTEJ1k+UFVQElaNIk65E/Z83HK1z1WI3lQyhTtnNg4R5oVXaPzK3jcpKG8UHKDVuDQHzn+wsxSz8RP3aw=="], + + "@internationalized/number": ["@internationalized/number@3.6.5", "", { "dependencies": { "@swc/helpers": "^0.5.0" } }, "sha512-6hY4Kl4HPBvtfS62asS/R22JzNNy8vi/Ssev7x6EobfCp+9QIB2hKvI2EtbdJ0VSQacxVNtqhE/NmF/NZ0gm6g=="], + "@isaacs/balanced-match": ["@isaacs/balanced-match@4.0.1", "", {}, "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ=="], "@isaacs/brace-expansion": ["@isaacs/brace-expansion@5.0.0", "", { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA=="], @@ -1038,6 +1046,10 @@ "@jsonhero/path": ["@jsonhero/path@1.0.21", "", {}, "sha512-gVUDj/92acpVoJwsVJ/RuWOaHyG4oFzn898WNGQItLCTQ+hOaVlEaImhwE1WqOTf+l3dGOUkbSiVKlb3q1hd1Q=="], + "@kobalte/core": ["@kobalte/core@0.13.11", "", { "dependencies": { "@floating-ui/dom": "^1.5.1", "@internationalized/date": "^3.4.0", "@internationalized/number": "^3.2.1", "@kobalte/utils": "^0.9.1", "@solid-primitives/props": "^3.1.8", "@solid-primitives/resize-observer": "^2.0.26", "solid-presence": "^0.1.8", "solid-prevent-scroll": "^0.1.4" }, "peerDependencies": { "solid-js": "^1.8.15" } }, "sha512-hK7TYpdib/XDb/r/4XDBFaO9O+3ZHz4ZWryV4/3BfES+tSQVgg2IJupDnztKXB0BqbSRy/aWlHKw1SPtNPYCFQ=="], + + "@kobalte/utils": ["@kobalte/utils@0.9.1", "", { "dependencies": { "@solid-primitives/event-listener": "^2.2.14", "@solid-primitives/keyed": "^1.2.0", "@solid-primitives/map": "^0.4.7", "@solid-primitives/media": "^2.2.4", "@solid-primitives/props": "^3.1.8", "@solid-primitives/refs": "^1.0.5", "@solid-primitives/utils": "^6.2.1" }, "peerDependencies": { "solid-js": "^1.8.8" } }, "sha512-eeU60A3kprIiBDAfv9gUJX1tXGLuZiKMajUfSQURAF2pk4ZoMYiqIzmrMBvzcxP39xnYttgTyQEVLwiTZnrV4w=="], + "@levischuck/tiny-cbor": ["@levischuck/tiny-cbor@0.2.11", "", {}, "sha512-llBRm4dT4Z89aRsm6u2oEZ8tfwL/2l6BwpZ7JcyieouniDECM5AqNgr/y08zalEIvW3RSK4upYyybDcmjXqAow=="], "@lukeed/csprng": ["@lukeed/csprng@1.1.0", "", {}, "sha512-Z7C/xXCiGWsg0KuKsHTKJxbWhpI3Vs5GwLfOean7MGyVFGqdRgBbAjOCh6u4bbjPc/8MJ2pZmK/0DLdCbivLDA=="], @@ -1054,6 +1066,18 @@ "@monogrid/gainmap-js": ["@monogrid/gainmap-js@3.1.0", "", { "dependencies": { "promise-worker-transferable": "^1.0.4" }, "peerDependencies": { "three": ">= 0.159.0" } }, "sha512-Obb0/gEd/HReTlg8ttaYk+0m62gQJmCblMOjHSMHRrBP2zdfKMHLCRbh/6ex9fSUJMKdjjIEiohwkbGD3wj2Nw=="], + "@motionone/animation": ["@motionone/animation@10.18.0", "", { "dependencies": { "@motionone/easing": "^10.18.0", "@motionone/types": "^10.17.1", "@motionone/utils": "^10.18.0", "tslib": "^2.3.1" } }, "sha512-9z2p5GFGCm0gBsZbi8rVMOAJCtw1WqBTIPw3ozk06gDvZInBPIsQcHgYogEJ4yuHJ+akuW8g1SEIOpTOvYs8hw=="], + + "@motionone/dom": ["@motionone/dom@10.18.0", "", { "dependencies": { "@motionone/animation": "^10.18.0", "@motionone/generators": "^10.18.0", "@motionone/types": "^10.17.1", "@motionone/utils": "^10.18.0", "hey-listen": "^1.0.8", "tslib": "^2.3.1" } }, "sha512-bKLP7E0eyO4B2UaHBBN55tnppwRnaE3KFfh3Ps9HhnAkar3Cb69kUCJY9as8LrccVYKgHA+JY5dOQqJLOPhF5A=="], + + "@motionone/easing": ["@motionone/easing@10.18.0", "", { "dependencies": { "@motionone/utils": "^10.18.0", "tslib": "^2.3.1" } }, "sha512-VcjByo7XpdLS4o9T8t99JtgxkdMcNWD3yHU/n6CLEz3bkmKDRZyYQ/wmSf6daum8ZXqfUAgFeCZSpJZIMxaCzg=="], + + "@motionone/generators": ["@motionone/generators@10.18.0", "", { "dependencies": { "@motionone/types": "^10.17.1", "@motionone/utils": "^10.18.0", "tslib": "^2.3.1" } }, "sha512-+qfkC2DtkDj4tHPu+AFKVfR/C30O1vYdvsGYaR13W/1cczPrrcjdvYCj0VLFuRMN+lP1xvpNZHCRNM4fBzn1jg=="], + + "@motionone/types": ["@motionone/types@10.17.1", "", {}, "sha512-KaC4kgiODDz8hswCrS0btrVrzyU2CSQKO7Ps90ibBVSQmjkrt2teqta6/sOG59v7+dPnKMAg13jyqtMKV2yJ7A=="], + + "@motionone/utils": ["@motionone/utils@10.18.0", "", { "dependencies": { "@motionone/types": "^10.17.1", "hey-listen": "^1.0.8", "tslib": "^2.3.1" } }, "sha512-3XVF7sgyTSI2KWvTf6uLlBJ5iAgRgmvp3bpuOiQJvInd4nZ19ET8lX5unn30SlmRH7hXbBbH+Gxd0m0klJ3Xtw=="], + "@msgpackr-extract/msgpackr-extract-darwin-arm64": ["@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-QZHtlVgbAdy2zAqNA9Gu1UpIuI8Xvsd1v8ic6B2pZmeFnFcMWiPLfWXh7TVw4eGEZ/C9TH281KwhVoeQUKbyjw=="], "@msgpackr-extract/msgpackr-extract-darwin-x64": ["@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-mdzd3AVzYKuUmiWOQ8GNhl64/IoFGol569zNRdkLReh6LRLHOXxU4U8eq0JwaD8iFHdVGqSy4IjFL4reoWCDFw=="], @@ -1124,6 +1148,14 @@ "@nolyfill/is-core-module": ["@nolyfill/is-core-module@1.0.39", "", {}, "sha512-nn5ozdjYQpUCZlWGuxcJY/KpxkWQs4DcbMCmKojjyrYDEAGy4Ce19NN4v5MduafTwJlbKc99UA8YhSVqq9yPZA=="], + "@novu/api": ["@novu/api@1.7.0", "", { "dependencies": { "zod": "^3.20.0" }, "peerDependencies": { "@modelcontextprotocol/sdk": ">=1.5.0 <1.10.0" }, "optionalPeers": ["@modelcontextprotocol/sdk"], "bin": { "mcp": "bin/mcp-server.js" } }, "sha512-MujRrk/UrTFAMlwsZZFwRdgupqARJKwYj0sINmcYAW0g8ets+vSewXEQMsaynGwDiFh8C4oDQlhmxWc+pT3NWA=="], + + "@novu/js": ["@novu/js@3.10.1", "", { "dependencies": { "@floating-ui/dom": "^1.6.13", "@kobalte/core": "^0.13.10", "class-variance-authority": "^0.7.0", "clsx": "^2.1.1", "event-target-polyfill": "^0.0.4", "mitt": "^3.0.1", "partysocket": "^1.1.4", "socket.io-client": "4.7.2", "solid-floating-ui": "^0.3.1", "solid-js": "^1.9.4", "solid-motionone": "^1.0.3", "tailwind-merge": "^2.4.0" } }, "sha512-ht8bwg5vPkmoNxHDbTqBMaAuNzO4bqhVpwxzbWLYUdauj2cPd3taI9yuvXYwln9C6utNe6O1UiEJECdJM8fafg=="], + + "@novu/nextjs": ["@novu/nextjs@3.10.1", "", { "dependencies": { "@novu/react": "3.10.1" }, "peerDependencies": { "next": ">=13.5.2 || ^14.0.0 || ^15.0.0", "react": "^18.0.0 || ^19.0.0 || ^19.0.0-0", "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-0" }, "optionalPeers": ["react-dom"] }, "sha512-I5OSRMs4pYBJpOhGjUSsG0yEobY9hwWGyHcK8l7PP8y8eU8FAjVSDpPbb4hDWf5GZ0Y1biT/qfLL4MFBvE2mWg=="], + + "@novu/react": ["@novu/react@3.10.1", "", { "dependencies": { "@novu/js": "3.10.1" }, "peerDependencies": { "react": "^18.0.0 || ^19.0.0 || ^19.0.0-0", "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-0" }, "optionalPeers": ["react-dom"] }, "sha512-fG76i7AzNyKWNqXIn1WyFnK7L2l1zVuaONj3tAia/iVjhnt7uNNRv6K46D0JPu34464+fK0/dOSbqqr89Tpk7w=="], + "@number-flow/react": ["@number-flow/react@0.5.10", "", { "dependencies": { "esm-env": "^1.1.4", "number-flow": "0.5.8" }, "peerDependencies": { "react": "^18 || ^19", "react-dom": "^18 || ^19" } }, "sha512-a8Wh5eNITn7Km4xbddAH7QH8eNmnduR6k34ER1hkHSGO4H2yU1DDnuAWLQM99vciGInFODemSc0tdxrXkJEpbA=="], "@nuxt/opencollective": ["@nuxt/opencollective@0.4.1", "", { "dependencies": { "consola": "^3.2.3" }, "bin": { "opencollective": "bin/opencollective.js" } }, "sha512-GXD3wy50qYbxCJ652bDrDzgMr3NFEkIS374+IgFQKkCvk9yiYcLvX2XDYr7UyQxf4wK0e+yqDYRubZ0DtOxnmQ=="], @@ -1674,6 +1706,30 @@ "@socket.io/component-emitter": ["@socket.io/component-emitter@3.1.2", "", {}, "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA=="], + "@solid-primitives/event-listener": ["@solid-primitives/event-listener@2.4.3", "", { "dependencies": { "@solid-primitives/utils": "^6.3.2" }, "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-h4VqkYFv6Gf+L7SQj+Y6puigL/5DIi7x5q07VZET7AWcS+9/G3WfIE9WheniHWJs51OEkRB43w6lDys5YeFceg=="], + + "@solid-primitives/keyed": ["@solid-primitives/keyed@1.5.2", "", { "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-BgoEdqPw48URnI+L5sZIHdF4ua4Las1eWEBBPaoSFs42kkhnHue+rwCBPL2Z9ebOyQ75sUhUfOETdJfmv0D6Kg=="], + + "@solid-primitives/map": ["@solid-primitives/map@0.4.13", "", { "dependencies": { "@solid-primitives/trigger": "^1.1.0" }, "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-B1zyFbsiTQvqPr+cuPCXO72sRuczG9Swncqk5P74NCGw1VE8qa/Ry9GlfI1e/VdeQYHjan+XkbE3rO2GW/qKew=="], + + "@solid-primitives/media": ["@solid-primitives/media@2.3.3", "", { "dependencies": { "@solid-primitives/event-listener": "^2.4.3", "@solid-primitives/rootless": "^1.5.2", "@solid-primitives/static-store": "^0.1.2", "@solid-primitives/utils": "^6.3.2" }, "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-hQ4hLOGvfbugQi5Eu1BFWAIJGIAzztq9x0h02xgBGl2l0Jaa3h7tg6bz5tV1NSuNYVGio4rPoa7zVQQLkkx9dA=="], + + "@solid-primitives/props": ["@solid-primitives/props@3.2.2", "", { "dependencies": { "@solid-primitives/utils": "^6.3.2" }, "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-lZOTwFJajBrshSyg14nBMEP0h8MXzPowGO0s3OeiR3z6nXHTfj0FhzDtJMv+VYoRJKQHG2QRnJTgCzK6erARAw=="], + + "@solid-primitives/refs": ["@solid-primitives/refs@1.1.2", "", { "dependencies": { "@solid-primitives/utils": "^6.3.2" }, "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-K7tf2thy7L+YJjdqXspXOg5xvNEOH8tgEWsp0+1mQk3obHBRD6hEjYZk7p7FlJphSZImS35je3UfmWuD7MhDfg=="], + + "@solid-primitives/resize-observer": ["@solid-primitives/resize-observer@2.1.3", "", { "dependencies": { "@solid-primitives/event-listener": "^2.4.3", "@solid-primitives/rootless": "^1.5.2", "@solid-primitives/static-store": "^0.1.2", "@solid-primitives/utils": "^6.3.2" }, "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-zBLje5E06TgOg93S7rGPldmhDnouNGhvfZVKOp+oG2XU8snA+GoCSSCz1M+jpNAg5Ek2EakU5UVQqL152WmdXQ=="], + + "@solid-primitives/rootless": ["@solid-primitives/rootless@1.5.2", "", { "dependencies": { "@solid-primitives/utils": "^6.3.2" }, "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-9HULb0QAzL2r47CCad0M+NKFtQ+LrGGNHZfteX/ThdGvKIg2o2GYhBooZubTCd/RTu2l2+Nw4s+dEfiDGvdrrQ=="], + + "@solid-primitives/static-store": ["@solid-primitives/static-store@0.1.2", "", { "dependencies": { "@solid-primitives/utils": "^6.3.2" }, "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-ReK+5O38lJ7fT+L6mUFvUr6igFwHBESZF+2Ug842s7fvlVeBdIVEdTCErygff6w7uR6+jrr7J8jQo+cYrEq4Iw=="], + + "@solid-primitives/transition-group": ["@solid-primitives/transition-group@1.1.2", "", { "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-gnHS0OmcdjeoHN9n7Khu8KNrOlRc8a2weETDt2YT6o1zeW/XtUC6Db3Q9pkMU/9cCKdEmN4b0a/41MKAHRhzWA=="], + + "@solid-primitives/trigger": ["@solid-primitives/trigger@1.2.2", "", { "dependencies": { "@solid-primitives/utils": "^6.3.2" }, "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-IWoptVc0SWYgmpBPpCMehS5b07+tpFcvw15tOQ3QbXedSYn6KP8zCjPkHNzMxcOvOicTneleeZDP7lqmz+PQ6g=="], + + "@solid-primitives/utils": ["@solid-primitives/utils@6.3.2", "", { "peerDependencies": { "solid-js": "^1.6.12" } }, "sha512-hZ/M/qr25QOCcwDPOHtGjxTD8w2mNyVAYvcfgwzBHq2RwNqHNdDNsMZYap20+ruRwW4A3Cdkczyoz0TSxLCAPQ=="], + "@standard-schema/spec": ["@standard-schema/spec@1.0.0", "", {}, "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA=="], "@standard-schema/utils": ["@standard-schema/utils@0.3.0", "", {}, "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g=="], @@ -2984,6 +3040,8 @@ "etag": ["etag@1.8.1", "", {}, "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="], + "event-target-polyfill": ["event-target-polyfill@0.0.4", "", {}, "sha512-Gs6RLjzlLRdT8X9ZipJdIZI/Y6/HhRLyq9RdDlCsnpxr/+Nn6bU2EFGuC94GjxqhM+Nmij2Vcq98yoHrU8uNFQ=="], + "event-target-shim": ["event-target-shim@5.0.1", "", {}, "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ=="], "eventemitter3": ["eventemitter3@5.0.1", "", {}, "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA=="], @@ -3238,6 +3296,8 @@ "hastscript": ["hastscript@6.0.0", "", { "dependencies": { "@types/hast": "^2.0.0", "comma-separated-tokens": "^1.0.0", "hast-util-parse-selector": "^2.0.0", "property-information": "^5.0.0", "space-separated-tokens": "^1.0.0" } }, "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w=="], + "hey-listen": ["hey-listen@1.0.8", "", {}, "sha512-COpmrF2NOg4TBWUJ5UVyaCU2A88wEMkUPK4hNqyCkqHbxT92BbvfjoSozkAIIm6XhicGlJHhFdullInrdhwU8Q=="], + "highlight.js": ["highlight.js@10.7.3", "", {}, "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A=="], "highlightjs-vue": ["highlightjs-vue@1.0.0", "", {}, "sha512-PDEfEF102G23vHmPhLyPboFCD+BkMGu+GuJe2d9/eH4FsCwvgBpnc9n0pGE+ffKdph38s6foEZiEjdgHdzp+IA=="], @@ -4062,6 +4122,8 @@ "parseurl": ["parseurl@1.3.3", "", {}, "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="], + "partysocket": ["partysocket@1.1.6", "", { "dependencies": { "event-target-polyfill": "^0.0.4" } }, "sha512-LkEk8N9hMDDsDT0iDK0zuwUDFVrVMUXFXCeN3850Ng8wtjPqPBeJlwdeY6ROlJSEh3tPoTTasXoSBYH76y118w=="], + "path-exists": ["path-exists@4.0.0", "", {}, "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="], "path-is-absolute": ["path-is-absolute@1.0.1", "", {}, "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg=="], @@ -4480,6 +4542,10 @@ "serialize-javascript": ["serialize-javascript@6.0.2", "", { "dependencies": { "randombytes": "^2.1.0" } }, "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g=="], + "seroval": ["seroval@1.3.2", "", {}, "sha512-RbcPH1n5cfwKrru7v7+zrZvjLurgHhGyso3HTyGtRivGWgYjbOmGuivCQaORNELjNONoK35nj28EoWul9sb1zQ=="], + + "seroval-plugins": ["seroval-plugins@1.3.3", "", { "peerDependencies": { "seroval": "^1.0" } }, "sha512-16OL3NnUBw8JG1jBLUoZJsLnQq0n5Ua6aHalhJK4fMQkz1lqR7Osz1sA30trBtd9VUDc2NgkuRCn8+/pBwqZ+w=="], + "serve-static": ["serve-static@2.2.0", "", { "dependencies": { "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "parseurl": "^1.3.3", "send": "^1.2.0" } }, "sha512-61g9pCh0Vnh7IutZjtLGGpTA355+OPn2TyDv/6ivP2h/AdAVX9azsoxmg2/M6nZeQZNYBEwIcsne1mJd9oQItQ=="], "server-only": ["server-only@0.0.1", "", {}, "sha512-qepMx2JxAa5jjfzxG79yPPq+8BuFToHd1hm7kI+Z4zAq1ftQiP7HcxMhDDItrbtwVeLg/cY2JnKnrcFkmiswNA=="], @@ -4548,6 +4614,16 @@ "socks-proxy-agent": ["socks-proxy-agent@8.0.5", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "^4.3.4", "socks": "^2.8.3" } }, "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw=="], + "solid-floating-ui": ["solid-floating-ui@0.3.1", "", { "peerDependencies": { "@floating-ui/dom": "^1.5", "solid-js": "^1.8" } }, "sha512-o/QmGsWPS2Z3KidAxP0nDvN7alI7Kqy0kU+wd85Fz+au5SYcnYm7I6Fk3M60Za35azsPX0U+5fEtqfOuk6Ao0Q=="], + + "solid-js": ["solid-js@1.9.9", "", { "dependencies": { "csstype": "^3.1.0", "seroval": "~1.3.0", "seroval-plugins": "~1.3.0" } }, "sha512-A0ZBPJQldAeGCTW0YRYJmt7RCeh5rbFfPZ2aOttgYnctHE7HgKeHCBB/PVc2P7eOfmNXqMFFFoYYdm3S4dcbkA=="], + + "solid-motionone": ["solid-motionone@1.0.4", "", { "dependencies": { "@motionone/dom": "^10.17.0", "@motionone/utils": "^10.17.0", "@solid-primitives/props": "^3.1.11", "@solid-primitives/refs": "^1.0.8", "@solid-primitives/transition-group": "^1.0.5", "csstype": "^3.1.3" }, "peerDependencies": { "solid-js": "^1.8.0" } }, "sha512-aqEjgecoO9raDFznu/dEci7ORSmA26Kjj9J4Cn1Gyr0GZuOVdvsNxdxClTL9J40Aq/uYFx4GLwC8n70fMLHiuA=="], + + "solid-presence": ["solid-presence@0.1.8", "", { "dependencies": { "@corvu/utils": "~0.4.0" }, "peerDependencies": { "solid-js": "^1.8" } }, "sha512-pWGtXUFWYYUZNbg5YpG5vkQJyOtzn2KXhxYaMx/4I+lylTLYkITOLevaCwMRN+liCVk0pqB6EayLWojNqBFECA=="], + + "solid-prevent-scroll": ["solid-prevent-scroll@0.1.10", "", { "dependencies": { "@corvu/utils": "~0.4.1" }, "peerDependencies": { "solid-js": "^1.8" } }, "sha512-KplGPX2GHiWJLZ6AXYRql4M127PdYzfwvLJJXMkO+CMb8Np4VxqDAg5S8jLdwlEuBis/ia9DKw2M8dFx5u8Mhw=="], + "sonner": ["sonner@2.0.7", "", { "peerDependencies": { "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc" } }, "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w=="], "source-map": ["source-map@0.8.0-beta.0", "", { "dependencies": { "whatwg-url": "^7.0.0" } }, "sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA=="], @@ -5244,6 +5320,8 @@ "@next/eslint-plugin-next/fast-glob": ["fast-glob@3.3.1", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.4" } }, "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg=="], + "@novu/js/socket.io-client": ["socket.io-client@4.7.2", "", { "dependencies": { "@socket.io/component-emitter": "~3.1.0", "debug": "~4.3.2", "engine.io-client": "~6.5.2", "socket.io-parser": "~4.2.4" } }, "sha512-vtA0uD4ibrYD793SOIAwlo8cj6haOeMHrGvwPxJsxH7CeIksqJ+3Zc06RvWTIFgiSqx4A3sOnTXpfAEE2Zyz6w=="], + "@octokit/endpoint/@octokit/types": ["@octokit/types@14.1.0", "", { "dependencies": { "@octokit/openapi-types": "^25.1.0" } }, "sha512-1y6DgTy8Jomcpu33N+p5w58l6xyt55Ar2I91RPiIA0xCJBXyUAhXCcmZaDWSANiha7R9a6qJJ2CRomGPZ6f46g=="], "@octokit/graphql/@octokit/types": ["@octokit/types@14.1.0", "", { "dependencies": { "@octokit/openapi-types": "^25.1.0" } }, "sha512-1y6DgTy8Jomcpu33N+p5w58l6xyt55Ar2I91RPiIA0xCJBXyUAhXCcmZaDWSANiha7R9a6qJJ2CRomGPZ6f46g=="], @@ -6310,6 +6388,8 @@ "@next/eslint-plugin-next/fast-glob/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], + "@novu/js/socket.io-client/debug": ["debug@4.3.7", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ=="], + "@octokit/endpoint/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@25.1.0", "", {}, "sha512-idsIggNXUKkk0+BExUn1dQ92sfysJrje03Q0bv0e+KPLrvyqZF8MnBpFz8UNfYDwB3Ie7Z0TByjWfzxt7vseaA=="], "@octokit/graphql/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@25.1.0", "", {}, "sha512-idsIggNXUKkk0+BExUn1dQ92sfysJrje03Q0bv0e+KPLrvyqZF8MnBpFz8UNfYDwB3Ie7Z0TByjWfzxt7vseaA=="], diff --git a/packages/email/emails/policy-review-notification.tsx b/packages/email/emails/policy-review-notification.tsx deleted file mode 100644 index 917cf532a..000000000 --- a/packages/email/emails/policy-review-notification.tsx +++ /dev/null @@ -1,117 +0,0 @@ -import { - Body, - Button, - Container, - Font, - Heading, - Html, - Link, - Preview, - Section, - Tailwind, - Text, -} from '@react-email/components'; -import { Footer } from '../components/footer'; -import { Logo } from '../components/logo'; - -interface Props { - email: string; - userName: string; - policyName: string; - organizationName: string; - organizationId: string; - policyId: string; -} - -export const PolicyReviewNotificationEmail = ({ - email, - userName, - policyName, - organizationName, - organizationId, - policyId, -}: Props) => { - const link = `${process.env.NEXT_PUBLIC_APP_URL ?? 'https://app.trycomp.ai'}/${organizationId}/policies/${policyId}`; - const subjectText = 'Policy review required'; - - return ( - - - - - - - - - {subjectText} - - - - - - {subjectText} - - - Hi {userName}, - - - The "{policyName}" policy for {organizationName} is due for review. Please review and publish. - - -
- -
- - - or copy and paste this URL into your browser{' '} - - {link} - - - -
-
- - This notification was intended for {email}. - -
- -
- -