This commit is contained in:
alma 2026-01-08 09:34:10 +01:00
parent 11f578dcfd
commit 85e0e94e82
60 changed files with 863 additions and 18554 deletions

BIN
.DS_Store vendored

Binary file not shown.

View File

@ -1,425 +0,0 @@
# Plan d'Action - Amélioration Flow de Connexion
## 🎯 Objectifs
1. **Améliorer l'UX** : Permettre SSO naturel pour les utilisateurs légitimes
2. **Sécuriser le logout** : S'assurer que les credentials sont demandés après logout
3. **Simplifier le code** : Réduire la complexité de détection session invalide
4. **Éliminer les race conditions** : Mécanisme robuste pour éviter auto-login après logout
---
## 📋 Actions Immédiates (À faire en premier)
### Action 1 : Supprimer `prompt=login` par défaut ⚡
**Fichier** : `app/api/auth/options.ts`
**Changement** :
```typescript
// AVANT (ligne 147-155)
authorization: {
params: {
scope: "openid profile email roles",
prompt: "login" // ❌ Supprimer cette ligne
}
}
// APRÈS
authorization: {
params: {
scope: "openid profile email roles",
// prompt: "login" supprimé - sera ajouté conditionnellement après logout
}
}
```
**Impact** : ✅ SSO fonctionne naturellement pour les utilisateurs légitimes
---
### Action 2 : Créer route API pour marquer logout ⚡
**Nouveau fichier** : `app/api/auth/mark-logout/route.ts`
```typescript
import { NextRequest, NextResponse } from 'next/server';
export async function POST(request: NextRequest) {
const response = NextResponse.json({
success: true,
message: 'Logout marked successfully'
});
// Cookie HttpOnly pour marquer le logout (5 minutes)
response.cookies.set('force_login_prompt', 'true', {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
path: '/',
maxAge: 300 // 5 minutes
});
return response;
}
```
**Impact** : ✅ Mécanisme robuste pour forcer login après logout
---
### Action 3 : Modifier signout-handler pour utiliser la route ⚡
**Fichier** : `components/auth/signout-handler.tsx`
**Changement** (après ligne 25) :
```typescript
// AVANT
clearKeycloakCookies();
// APRÈS
clearKeycloakCookies();
// Marquer le logout côté serveur
try {
await fetch('/api/auth/mark-logout', {
method: 'POST',
credentials: 'include',
});
} catch (error) {
console.error('Error marking logout:', error);
// Continue même si ça échoue
}
```
**Répéter dans** :
- `components/main-nav.tsx` (ligne ~377)
- `components/layout/layout-wrapper.tsx` (ligne ~42)
**Impact** : ✅ Flag serveur pour empêcher auto-login
---
### Action 4 : Simplifier signin/page.tsx ⚡
**Fichier** : `app/signin/page.tsx`
**Changement** : Remplacer la logique complexe (lignes 17-67) par :
```typescript
useEffect(() => {
// Vérifier le cookie serveur pour forcer login
const forceLoginCookie = document.cookie
.split(';')
.find(c => c.trim().startsWith('force_login_prompt='));
// Si logout récent, forcer prompt=login
if (forceLoginCookie) {
// Supprimer le cookie
document.cookie = 'force_login_prompt=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;';
// Ne pas auto-login, attendre clic utilisateur
// Le bouton "Se connecter" forcera prompt=login
return;
}
// Si déjà authentifié, rediriger
if (status === "authenticated" && session?.user) {
router.push("/");
return;
}
// Si non authentifié et pas de flag logout, auto-login (SSO naturel)
if (status === "unauthenticated" && !forceLoginCookie) {
const timer = setTimeout(() => {
if (status === "unauthenticated") {
signIn("keycloak", { callbackUrl: "/" });
}
}, 1000);
return () => clearTimeout(timer);
}
}, [status, session, router]);
```
**ET** modifier le bouton "Se connecter" (ligne ~202) :
```typescript
<button
onClick={() => {
// Forcer prompt=login en ajoutant un paramètre
// Note: NextAuth ne supporte pas prompt directement dans signIn()
// Solution: Utiliser un paramètre custom dans l'URL
const url = new URL(window.location.origin + '/api/auth/signin/keycloak');
url.searchParams.set('callbackUrl', '/');
url.searchParams.set('force_login', 'true');
window.location.href = url.toString();
}}
className="mt-4 px-4 py-2 bg-blue-600 text-white rounded-lg hover:bg-blue-700"
>
Se connecter
</button>
```
**Impact** : ✅ Code plus simple et maintenable
---
### Action 5 : Ajouter prompt=login conditionnel dans options.ts ⚡
**Fichier** : `app/api/auth/options.ts`
**Changement** : Modifier la configuration KeycloakProvider pour accepter un paramètre custom :
```typescript
KeycloakProvider({
clientId: getRequiredEnvVar("KEYCLOAK_CLIENT_ID"),
clientSecret: getRequiredEnvVar("KEYCLOAK_CLIENT_SECRET"),
issuer: getRequiredEnvVar("KEYCLOAK_ISSUER"),
authorization: {
params: {
scope: "openid profile email roles",
// prompt sera ajouté dynamiquement si force_login=true dans l'URL
}
},
// ... profile callback ...
})
```
**ET** créer une route custom pour signin qui ajoute prompt :
**Nouveau fichier** : `app/api/auth/signin/keycloak/route.ts`
```typescript
import { NextRequest, NextResponse } from 'next/server';
import { getServerSession } from 'next-auth/next';
import { authOptions } from '../../options';
export async function GET(request: NextRequest) {
const searchParams = request.nextUrl.searchParams;
const forceLogin = searchParams.get('force_login') === 'true';
const callbackUrl = searchParams.get('callbackUrl') || '/';
// Rediriger vers NextAuth signin avec prompt si nécessaire
const signinUrl = new URL('/api/auth/signin/keycloak', request.nextUrl.origin);
signinUrl.searchParams.set('callbackUrl', callbackUrl);
if (forceLogin) {
// Ajouter prompt=login dans l'URL de redirection Keycloak
// Note: NextAuth ne supporte pas directement, il faut modifier l'URL après
// Solution alternative: Utiliser un middleware ou modifier options dynamiquement
}
return NextResponse.redirect(signinUrl);
}
```
**OU** Solution plus simple : Modifier directement dans `options.ts` pour lire un cookie :
```typescript
// Dans options.ts, modifier authorization params dynamiquement
authorization: {
params: (provider, action, request) => {
const forceLogin = request?.cookies?.get('force_login_prompt')?.value === 'true';
return {
scope: "openid profile email roles",
...(forceLogin ? { prompt: "login" } : {}),
};
}
}
```
**Note** : NextAuth v4 ne supporte pas `params` comme fonction. Solution alternative :
**Modifier** `app/api/auth/options.ts` pour utiliser `authorization.url` :
```typescript
KeycloakProvider({
// ... config ...
authorization: {
params: {
scope: "openid profile email roles",
},
// Ajouter prompt dynamiquement via URL personnalisée
url: (params) => {
// Vérifier si on doit forcer login (via cookie ou autre moyen)
const url = new URL(`${process.env.KEYCLOAK_ISSUER}/protocol/openid-connect/auth`);
url.searchParams.set('client_id', process.env.KEYCLOAK_CLIENT_ID!);
url.searchParams.set('redirect_uri', params.redirect_uri);
url.searchParams.set('response_type', 'code');
url.searchParams.set('scope', 'openid profile email roles');
url.searchParams.set('state', params.state);
// Ajouter prompt si nécessaire (à vérifier via cookie dans le callback)
// Note: Plus complexe, nécessite de passer le flag via state
return url.toString();
}
}
})
```
**Solution RECOMMANDÉE (plus simple)** : Utiliser un paramètre dans l'URL de callback et le vérifier dans le callback JWT :
```typescript
// Dans signin/page.tsx, lors du clic sur "Se connecter"
const url = new URL(window.location.origin + '/api/auth/signin/keycloak');
url.searchParams.set('callbackUrl', '/');
url.searchParams.set('force_login', 'true');
// Stocker dans sessionStorage pour le callback
sessionStorage.setItem('force_login', 'true');
window.location.href = url.toString();
// Dans options.ts, callback jwt, vérifier sessionStorage n'est pas possible côté serveur
// Solution: Passer via state OAuth
```
**MEILLEURE SOLUTION** : Utiliser un cookie avant le signIn :
```typescript
// Dans signin/page.tsx, bouton "Se connecter"
onClick={() => {
// Créer cookie pour forcer login
document.cookie = 'force_login_prompt=true; path=/; max-age=300';
// Puis signIn normal
signIn("keycloak", { callbackUrl: "/" });
}}
// Dans options.ts, lire le cookie dans authorization params
// Note: NextAuth ne permet pas d'accéder aux cookies dans params
// Solution: Middleware ou route custom
```
**SOLUTION FINALE RECOMMANDÉE** : Créer une route API custom qui gère le signin avec prompt conditionnel :
```typescript
// app/api/auth/custom-signin/route.ts
import { NextRequest, NextResponse } from 'next/server';
export async function GET(request: NextRequest) {
const searchParams = request.nextUrl.searchParams;
const forceLogin = searchParams.get('force_login') === 'true';
const callbackUrl = searchParams.get('callbackUrl') || '/';
// Construire l'URL Keycloak avec prompt si nécessaire
const keycloakIssuer = process.env.KEYCLOAK_ISSUER!;
const clientId = process.env.KEYCLOAK_CLIENT_ID!;
const redirectUri = `${request.nextUrl.origin}/api/auth/callback/keycloak`;
const authUrl = new URL(`${keycloakIssuer}/protocol/openid-connect/auth`);
authUrl.searchParams.set('client_id', clientId);
authUrl.searchParams.set('redirect_uri', redirectUri);
authUrl.searchParams.set('response_type', 'code');
authUrl.searchParams.set('scope', 'openid profile email roles');
authUrl.searchParams.set('state', generateState()); // Générer state
if (forceLogin) {
authUrl.searchParams.set('prompt', 'login');
}
return NextResponse.redirect(authUrl.toString());
}
```
**Impact** : ✅ Prompt login seulement après logout
---
## 🔧 Actions Secondaires (Après les actions immédiates)
### Action 6 : Configurer explicitement les cookies NextAuth
**Fichier** : `app/api/auth/options.ts`
**Ajouter** après `session: { ... }` :
```typescript
cookies: {
sessionToken: {
name: `next-auth.session-token`,
options: {
httpOnly: true,
sameSite: 'lax',
path: '/',
secure: process.env.NEXTAUTH_URL?.startsWith('https://') ?? false,
},
},
// ... autres cookies si nécessaire
},
```
---
### Action 7 : Améliorer Keycloak logout URL
**Fichiers** :
- `components/auth/signout-handler.tsx`
- `components/main-nav.tsx`
- `components/layout/layout-wrapper.tsx`
**Changement** (ligne ~58-76) :
```typescript
// AVANT
keycloakLogoutUrl.searchParams.append('kc_action', 'LOGOUT');
// APRÈS
keycloakLogoutUrl.searchParams.append('kc_action', 'LOGOUT');
// Ajouter client_id pour forcer logout client spécifique
if (process.env.NEXT_PUBLIC_KEYCLOAK_CLIENT_ID) {
keycloakLogoutUrl.searchParams.append('client_id',
process.env.NEXT_PUBLIC_KEYCLOAK_CLIENT_ID);
}
```
---
### Action 8 : Améliorer gestion erreur refresh token
**Fichier** : `app/api/auth/options.ts`
**Changement** : Voir détails dans `IMPROVEMENTS_LOGIN_FLOW.md` section "Problème 7"
---
## ✅ Checklist d'Implémentation
### Phase 1 : Corrections Critiques (1-2 heures)
- [ ] Action 1 : Supprimer `prompt=login` par défaut
- [ ] Action 2 : Créer route `/api/auth/mark-logout`
- [ ] Action 3 : Modifier signout-handler pour utiliser la route
- [ ] Action 4 : Simplifier signin/page.tsx
- [ ] Action 5 : Ajouter prompt=login conditionnel
### Phase 2 : Améliorations (1 heure)
- [ ] Action 6 : Configurer explicitement les cookies
- [ ] Action 7 : Améliorer Keycloak logout URL
- [ ] Action 8 : Améliorer gestion erreur refresh
### Phase 3 : Tests (30 minutes)
- [ ] Tester login première visite (SSO doit fonctionner)
- [ ] Tester login après logout (credentials doivent être demandés)
- [ ] Tester logout depuis dashboard
- [ ] Tester logout depuis iframe
- [ ] Tester expiration session
---
## 🎯 Résultat Attendu
### Avant
- ❌ Toujours demander credentials (même première visite)
- ❌ Logique complexe de détection session invalide
- ❌ Race conditions possibles
- ❌ Cookies Keycloak peuvent persister
### Après
- ✅ SSO naturel pour utilisateurs légitimes
- ✅ Credentials demandés seulement après logout
- ✅ Détection session invalide simple et robuste
- ✅ Pas de race conditions
- ✅ Meilleure gestion des cookies
---
**Document créé le** : $(date)
**Priorité** : Actions immédiates à faire en premier

724
AUDIT_API_N8N_CONNECTION.md Normal file
View File

@ -0,0 +1,724 @@
# 🔍 Audit Développeur Senior - Connexion API Next.js ↔️ N8N (Missions)
**Date**: $(date)
**Auteur**: Audit Développeur Senior
**Objectif**: Vérifier et documenter la connexion entre Next.js et N8N pour la gestion des missions
---
## 📋 Table des Matières
1. [Architecture Globale](#architecture-globale)
2. [Flux de Communication](#flux-de-communication)
3. [Endpoints API](#endpoints-api)
4. [Configuration Requise](#configuration-requise)
5. [Sécurité](#sécurité)
6. [Points Critiques à Vérifier](#points-critiques-à-vérifier)
7. [Problèmes Potentiels et Solutions](#problèmes-potentiels-et-solutions)
8. [Tests et Validation](#tests-et-validation)
9. [Recommandations](#recommandations)
---
## 🏗️ Architecture Globale
### Vue d'ensemble
```
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
│ Next.js │────────▶│ N8N │────────▶│ Intégrations│
│ (API) │ │ (Workflow) │ │ (Gitea, etc)│
└─────────────┘ └─────────────┘ └─────────────┘
│ │
│ │
└─────────────────────────┘
(Callback)
```
### Composants Principaux
1. **Next.js API Routes**
- `POST /api/missions` - Création de mission
- `POST /api/missions/mission-created` - Callback de N8N
- `GET /api/missions` - Liste des missions
2. **Service N8N** (`lib/services/n8n-service.ts`)
- Envoi de données vers N8N
- Gestion des webhooks
- Gestion des erreurs
3. **N8N Workflows**
- Webhook de réception: `/webhook/mission-created`
- Création des intégrations externes
- Callback vers Next.js
---
## 🔄 Flux de Communication
### 1. Création d'une Mission (Next.js → N8N → Next.js)
```
┌─────────────────────────────────────────────────────────────────┐
│ ÉTAPE 1: Création Mission dans Next.js │
└─────────────────────────────────────────────────────────────────┘
POST /api/missions
1. Validation des données
2. Création en base de données (Prisma)
3. Upload des fichiers (logo, attachments) vers Minio
4. Vérification des fichiers
┌─────────────────────────────────────────────────────────────────┐
│ ÉTAPE 2: Envoi vers N8N │
└─────────────────────────────────────────────────────────────────┘
POST https://brain.slm-lab.net/webhook/mission-created
Headers:
- Content-Type: application/json
- x-api-key: {N8N_API_KEY}
Body:
{
missionId: "uuid",
name: "...",
oddScope: [...],
services: [...],
config: {
N8N_API_KEY: "...",
MISSION_API_URL: "https://api.slm-lab.net/api"
},
...
}
┌─────────────────────────────────────────────────────────────────┐
│ ÉTAPE 3: Traitement N8N │
└─────────────────────────────────────────────────────────────────┘
N8N Workflow:
1. Réception webhook
2. Création Gitea repository (si service "Gite")
3. Création Leantime project (si service "Leantime")
4. Création Outline collection (si service "Documentation")
5. Création RocketChat channel (si service "RocketChat")
6. Préparation des données de callback
┌─────────────────────────────────────────────────────────────────┐
│ ÉTAPE 4: Callback N8N → Next.js │
└─────────────────────────────────────────────────────────────────┘
POST {MISSION_API_URL}/api/missions/mission-created
Headers:
- Content-Type: application/json
- x-api-key: {N8N_API_KEY} (depuis config.N8N_API_KEY)
Body:
{
missionId: "uuid",
gitRepoUrl: "...",
leantimeProjectId: "...",
documentationCollectionId: "...",
rocketchatChannelId: "..."
}
┌─────────────────────────────────────────────────────────────────┐
│ ÉTAPE 5: Mise à jour Mission dans Next.js │
└─────────────────────────────────────────────────────────────────┘
Validation API key
Recherche mission par missionId
Mise à jour des champs d'intégration:
- giteaRepositoryUrl
- leantimeProjectId
- outlineCollectionId
- rocketChatChannelId
```
---
## 🔌 Endpoints API
### 1. POST /api/missions
**Fichier**: `app/api/missions/route.ts`
**Fonction**: Créer une nouvelle mission et déclencher le workflow N8N
**Authentification**:
- Session utilisateur requise (via `getServerSession`)
- Vérification: `checkAuth(request)`
**Body attendu**:
```typescript
{
name: string;
oddScope: string[];
niveau?: string;
intention?: string;
missionType?: string;
services?: string[];
guardians?: Record<string, string>;
volunteers?: string[];
logo?: { data: string; name?: string; type?: string };
attachments?: Array<{ data: string; name?: string; type?: string }>;
}
```
**Réponse**:
```json
{
"success": true,
"mission": { ... },
"message": "Mission created successfully with all integrations"
}
```
**Points critiques**:
- ✅ Mission créée en base AVANT l'envoi à N8N
- ✅ Fichiers uploadés et vérifiés AVANT l'envoi à N8N
- ✅ `missionId` inclus dans les données envoyées à N8N
- ✅ `config.N8N_API_KEY` et `config.MISSION_API_URL` inclus
---
### 2. POST /api/missions/mission-created
**Fichier**: `app/api/missions/mission-created/route.ts`
**Fonction**: Recevoir les IDs d'intégration de N8N et mettre à jour la mission
**Authentification**:
- **API Key** via header `x-api-key`
- **PAS** de session utilisateur requise (N8N n'a pas de session)
**Headers requis**:
```
x-api-key: {N8N_API_KEY}
Content-Type: application/json
```
**Body attendu**:
```typescript
{
missionId: string; // ✅ Préféré (plus fiable)
// OU (fallback pour compatibilité)
name: string;
creatorId: string;
// IDs d'intégration (optionnels)
gitRepoUrl?: string;
leantimeProjectId?: string | number;
documentationCollectionId?: string;
rocketchatChannelId?: string;
}
```
**Réponse succès**:
```json
{
"success": true,
"message": "Mission updated successfully",
"mission": {
"id": "...",
"name": "...",
"giteaRepositoryUrl": "...",
"leantimeProjectId": "...",
"outlineCollectionId": "...",
"rocketChatChannelId": "..."
}
}
```
**Codes d'erreur**:
- `401` - API key invalide ou manquante
- `400` - Champs requis manquants
- `404` - Mission non trouvée
- `500` - Erreur serveur
**Points critiques**:
- ✅ Validation stricte de l'API key
- ✅ Recherche par `missionId` (préféré) ou `name + creatorId` (fallback)
- ✅ Conversion `leantimeProjectId` de number vers string si nécessaire
- ✅ Mise à jour uniquement des champs fournis
---
## ⚙️ Configuration Requise
### Variables d'Environnement
#### 1. N8N_API_KEY (OBLIGATOIRE)
```env
N8N_API_KEY=LwgeE1ntADD20OuWC88S3pR0EaO7FtO4
```
**Usage**:
- Envoyé à N8N dans `config.N8N_API_KEY`
- N8N l'utilise pour authentifier le callback
- Vérifié côté serveur dans `/api/missions/mission-created`
**Où configurer**:
- `.env.local` (développement)
- Variables d'environnement production (CapRover, Vercel, Docker, etc.)
**Vérification**:
```typescript
// Erreur si non défini
if (!process.env.N8N_API_KEY) {
logger.error('N8N_API_KEY is not set in environment variables');
}
```
---
#### 2. N8N_WEBHOOK_URL (Optionnel)
```env
N8N_WEBHOOK_URL=https://brain.slm-lab.net/webhook/mission-created
```
**Valeur par défaut**: `https://brain.slm-lab.net/webhook/mission-created`
**Usage**: URL du webhook N8N pour la création de mission
---
#### 3. NEXT_PUBLIC_API_URL (Recommandé)
```env
NEXT_PUBLIC_API_URL=https://api.slm-lab.net/api
```
**Usage**:
- Envoyé à N8N dans `config.MISSION_API_URL`
- N8N l'utilise pour construire l'URL du callback
- Format attendu: `{MISSION_API_URL}/api/missions/mission-created`
**Valeur par défaut**: `https://api.slm-lab.net/api`
---
#### 4. N8N_ROLLBACK_WEBHOOK_URL (Optionnel)
```env
N8N_ROLLBACK_WEBHOOK_URL=https://brain.slm-lab.net/webhook/mission-rollback
```
**Usage**: URL du webhook N8N pour le rollback de mission
---
### Configuration N8N Workflow
#### Webhook de Réception
**Path**: `mission-created`
**URL complète**: `https://brain.slm-lab.net/webhook/mission-created`
**Méthode**: `POST`
**Status**: Doit être **ACTIF** (toggle vert dans N8N)
---
#### Node "Save Mission To API"
**URL**:
```
{{ $node['Process Mission Data'].json.config.MISSION_API_URL }}/api/missions/mission-created
```
**Méthode**: `POST`
**Headers**:
```
Content-Type: application/json
x-api-key: {{ $node['Process Mission Data'].json.config.N8N_API_KEY }}
```
**Body**:
```json
{
"missionId": "{{ $node['Process Mission Data'].json.missionId }}",
"gitRepoUrl": "{{ $node['Create Git Repo'].json.url }}",
"leantimeProjectId": "{{ $node['Create Leantime Project'].json.id }}",
"documentationCollectionId": "{{ $node['Create Outline Collection'].json.id }}",
"rocketchatChannelId": "{{ $node['Create RocketChat Channel'].json.id }}"
}
```
**Points critiques**:
- ✅ Utiliser `config.MISSION_API_URL` (pas d'URL en dur)
- ✅ Utiliser `config.N8N_API_KEY` (pas de clé en dur)
- ✅ Inclure `missionId` dans le body
- ✅ Inclure tous les IDs d'intégration créés
---
## 🔒 Sécurité
### 1. Authentification API Key
**Mécanisme**:
- N8N envoie `x-api-key` header
- Next.js compare avec `process.env.N8N_API_KEY`
- Si différent → `401 Unauthorized`
**Code de validation** (`app/api/missions/mission-created/route.ts:42`):
```typescript
const apiKey = request.headers.get('x-api-key');
const expectedApiKey = process.env.N8N_API_KEY;
if (apiKey !== expectedApiKey) {
logger.error('Invalid API key', {
received: apiKey ? 'present' : 'missing',
expected: expectedApiKey ? 'configured' : 'missing'
});
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
}
```
**Points critiques**:
- ✅ Comparaison stricte (pas de hash, clé en clair)
- ✅ Logging des tentatives invalides
- ✅ Pas de fallback si clé manquante
---
### 2. Transmission de la Clé API
**Flux**:
1. Next.js lit `process.env.N8N_API_KEY`
2. Next.js envoie à N8N dans `config.N8N_API_KEY`
3. N8N stocke temporairement dans le workflow
4. N8N renvoie dans header `x-api-key` lors du callback
**Risque**: Si `N8N_API_KEY` est `undefined` au moment de l'envoi:
- N8N reçoit `undefined` ou chaîne vide
- N8N envoie chaîne vide dans le header
- Next.js rejette avec `401`
**Solution**: Vérifier que `N8N_API_KEY` est défini avant l'envoi à N8N
---
### 3. Validation des Données
**Côté Next.js**:
- ✅ Validation des champs requis
- ✅ Recherche de mission par `missionId` (plus sûr que `name + creatorId`)
- ✅ Conversion de types (number → string pour `leantimeProjectId`)
**Côté N8N**:
- ⚠️ Pas de validation visible dans le code Next.js
- ⚠️ N8N doit valider les données avant création des intégrations
---
## ⚠️ Points Critiques à Vérifier
### 1. Configuration Environnement
- [ ] `N8N_API_KEY` est défini dans l'environnement
- [ ] `N8N_API_KEY` a la même valeur partout (dev, staging, prod)
- [ ] `NEXT_PUBLIC_API_URL` pointe vers la bonne URL
- [ ] Application redémarrée après modification des variables
---
### 2. Workflow N8N
- [ ] Workflow est **ACTIF** (toggle vert)
- [ ] Webhook path est correct: `mission-created`
- [ ] Node "Save Mission To API" utilise `config.MISSION_API_URL`
- [ ] Node "Save Mission To API" utilise `config.N8N_API_KEY`
- [ ] Node "Save Mission To API" inclut `missionId` dans le body
- [ ] Tous les IDs d'intégration sont inclus dans le callback
---
### 3. Flux de Données
- [ ] `missionId` est envoyé à N8N lors de la création
- [ ] `missionId` est renvoyé par N8N dans le callback
- [ ] Les IDs d'intégration sont correctement mappés:
- `gitRepoUrl``giteaRepositoryUrl`
- `leantimeProjectId``leantimeProjectId` (string)
- `documentationCollectionId``outlineCollectionId`
- `rocketchatChannelId``rocketChatChannelId`
---
### 4. Gestion d'Erreurs
- [ ] Erreurs N8N sont loggées
- [ ] Rollback en cas d'échec (si configuré)
- [ ] Messages d'erreur clairs pour debugging
- [ ] Pas de données sensibles dans les logs
---
## 🐛 Problèmes Potentiels et Solutions
### Problème 1: 401 Unauthorized
**Symptômes**:
```
Invalid API key { received: 'present', expected: 'configured' }
```
**Causes possibles**:
1. `N8N_API_KEY` non défini dans l'environnement
2. `N8N_API_KEY` différent entre Next.js et N8N
3. N8N envoie une clé vide ou `undefined`
**Solutions**:
1. Vérifier que `N8N_API_KEY` est défini:
```bash
echo $N8N_API_KEY
```
2. Vérifier la valeur dans N8N:
- Ouvrir l'exécution du workflow
- Vérifier `config.N8N_API_KEY` dans "Process Mission Data"
3. S'assurer que la même clé est utilisée partout
---
### Problème 2: 404 Mission Not Found
**Symptômes**:
```
Mission not found { missionId: "...", name: "...", creatorId: "..." }
```
**Causes possibles**:
1. `missionId` non envoyé par N8N
2. `missionId` incorrect
3. Mission supprimée entre temps
**Solutions**:
1. Vérifier que N8N envoie `missionId`:
```json
{
"missionId": "{{ $node['Process Mission Data'].json.missionId }}"
}
```
2. Vérifier que Next.js envoie `missionId` à N8N:
```typescript
config: {
missionId: mission.id // ✅ Inclus dans n8nData
}
```
3. Utiliser le fallback `name + creatorId` si `missionId` manquant
---
### Problème 3: 500 Server Configuration Error
**Symptômes**:
```
N8N_API_KEY not configured in environment
```
**Cause**: `process.env.N8N_API_KEY` est `undefined`
**Solution**:
1. Ajouter `N8N_API_KEY` à `.env.local` ou variables d'environnement
2. Redémarrer l'application
3. Vérifier avec un endpoint de test
---
### Problème 4: 404 Webhook Not Registered
**Symptômes**:
```
404 Error: The requested webhook "mission-created" is not registered.
Hint: Click the 'Execute workflow' button on the canvas, then try again.
```
**Cause**: Workflow N8N n'est pas actif
**Solution**:
1. Ouvrir le workflow dans N8N
2. Activer le toggle "Active" (devrait être vert)
3. Vérifier que le webhook node est actif
---
### Problème 5: IDs d'Intégration Non Sauvegardés
**Symptômes**:
- Mission créée mais `giteaRepositoryUrl`, `leantimeProjectId`, etc. sont `null`
**Causes possibles**:
1. N8N ne rappelle pas `/api/missions/mission-created`
2. N8N rappelle mais avec des IDs manquants
3. Erreur lors de la mise à jour en base
**Solutions**:
1. Vérifier les logs N8N (Executions)
2. Vérifier que le node "Save Mission To API" s'exécute
3. Vérifier les logs Next.js pour "Mission Created Webhook Received"
4. Vérifier que tous les IDs sont inclus dans le body du callback
---
## 🧪 Tests et Validation
### Test 1: Vérifier Configuration
**Endpoint de test** (à créer):
```typescript
// app/api/test-n8n-config/route.ts
import { NextResponse } from 'next/server';
export async function GET() {
return NextResponse.json({
hasN8NApiKey: !!process.env.N8N_API_KEY,
n8nApiKeyLength: process.env.N8N_API_KEY?.length || 0,
n8nWebhookUrl: process.env.N8N_WEBHOOK_URL || 'https://brain.slm-lab.net/webhook/mission-created',
missionApiUrl: process.env.NEXT_PUBLIC_API_URL || 'https://api.slm-lab.net/api'
});
}
```
**Usage**: `GET /api/test-n8n-config`
---
### Test 2: Tester Webhook N8N
```bash
curl -X POST https://brain.slm-lab.net/webhook/mission-created \
-H "Content-Type: application/json" \
-d '{"test": "data"}'
```
**Résultats attendus**:
- ✅ `200/400/500` avec erreur workflow: Webhook actif
- ❌ `404` avec "webhook not registered": Webhook inactif
---
### Test 3: Tester Callback Endpoint
```bash
curl -X POST https://api.slm-lab.net/api/missions/mission-created \
-H "Content-Type: application/json" \
-H "x-api-key: YOUR_N8N_API_KEY" \
-d '{
"missionId": "test-mission-id",
"gitRepoUrl": "https://git.example.com/repo",
"leantimeProjectId": "123"
}'
```
**Résultats attendus**:
- ✅ `200` avec `success: true`: API key valide
- ❌ `401`: API key invalide
- ❌ `404`: Mission non trouvée (normal si missionId de test)
---
### Test 4: Créer une Mission Complète
1. Créer une mission via le frontend
2. Vérifier les logs Next.js:
- ✅ "Mission created successfully"
- ✅ "Starting N8N workflow"
- ✅ "N8N workflow result { success: true }"
3. Vérifier les logs N8N (Executions):
- ✅ Workflow exécuté avec succès
- ✅ Node "Save Mission To API" exécuté
4. Vérifier la base de données:
- ✅ Mission a les IDs d'intégration sauvegardés
---
## 💡 Recommandations
### 1. Amélioration de la Sécurité
**Problème actuel**: Clé API en clair, comparaison simple
**Recommandations**:
- [ ] Utiliser un système de tokens avec expiration
- [ ] Implémenter un système de signature HMAC
- [ ] Ajouter un rate limiting sur `/api/missions/mission-created`
- [ ] Logging des tentatives d'accès invalides avec IP
---
### 2. Amélioration de la Robustesse
**Problème actuel**: Pas de retry automatique si N8N échoue
**Recommandations**:
- [ ] Implémenter un système de retry avec backoff exponentiel
- [ ] Queue de messages pour les callbacks manqués
- [ ] Webhook de santé pour vérifier que N8N est accessible
- [ ] Timeout configurable pour les appels N8N
---
### 3. Amélioration du Debugging
**Problème actuel**: Logs dispersés, pas de traçabilité complète
**Recommandations**:
- [ ] Ajouter un `correlationId` pour tracer une mission de bout en bout
- [ ] Logs structurés avec contexte complet
- [ ] Dashboard de monitoring des intégrations
- [ ] Alertes en cas d'échec répété
---
### 4. Amélioration de la Documentation
**Recommandations**:
- [ ] Documenter le format exact attendu par N8N
- [ ] Exemples de payloads complets
- [ ] Diagrammes de séquence détaillés
- [ ] Guide de troubleshooting avec cas réels
---
### 5. Tests Automatisés
**Recommandations**:
- [ ] Tests unitaires pour `N8nService`
- [ ] Tests d'intégration pour les endpoints API
- [ ] Tests E2E avec mock N8N
- [ ] Tests de charge pour vérifier la scalabilité
---
## 📝 Checklist de Vérification Rapide
### Configuration
- [ ] `N8N_API_KEY` défini et identique partout
- [ ] `NEXT_PUBLIC_API_URL` pointe vers la bonne URL
- [ ] Application redémarrée après modifications
### N8N Workflow
- [ ] Workflow actif (toggle vert)
- [ ] Webhook path: `mission-created`
- [ ] Node "Save Mission To API" configuré correctement
- [ ] `missionId` inclus dans le callback
### Code Next.js
- [ ] `missionId` envoyé à N8N lors de la création
- [ ] Validation API key fonctionnelle
- [ ] Mapping des champs correct
- [ ] Gestion d'erreurs appropriée
### Tests
- [ ] Test de création de mission réussi
- [ ] IDs d'intégration sauvegardés en base
- [ ] Logs sans erreurs critiques
---
## 🔗 Références
- **Service N8N**: `lib/services/n8n-service.ts`
- **Endpoint création**: `app/api/missions/route.ts`
- **Endpoint callback**: `app/api/missions/mission-created/route.ts`
- **Documentation N8N**: Voir fichiers `N8N_*.md` dans le projet
---
**Document créé le**: $(date)
**Dernière mise à jour**: $(date)
**Version**: 1.0

View File

@ -1,224 +0,0 @@
# Authentication Flow Fixes
## Issues Fixed
### 1. Logout Loop Issue ✅
**Problem**:
- User couldn't log out - infinite redirect loop
- Sign-in page auto-triggered Keycloak login even when user was already authenticated
- Keycloak session cookies weren't cleared, causing immediate re-authentication
**Root Cause**:
- `/signin` page had `useEffect(() => { signIn("keycloak") }, [])` that always triggered login
- No check for existing authentication status
- Keycloak logout endpoint was never called, leaving Keycloak cookies valid
**Fix Applied**:
1. **Sign-in page** (`app/signin/page.tsx`):
- Added check for existing session before triggering login
- If user is already authenticated, redirect to home
- Only trigger Keycloak login if status is "unauthenticated"
2. **Sign-out handler** (`components/auth/signout-handler.tsx`):
- Now properly calls Keycloak logout endpoint
- Uses ID token for proper logout
- Clears both NextAuth and Keycloak cookies
3. **Main navigation logout** (`components/main-nav.tsx`):
- Fixed to use `idToken` instead of `accessToken` for Keycloak logout
- Proper logout flow with Keycloak endpoint
---
### 2. Iframe Applications Logging Out ✅
**Problem**:
- Iframe applications were logging out even when user was still authenticated in dashboard
- Desynchronization between NextAuth session and Keycloak session
**Root Cause**:
- Sign-out only cleared NextAuth cookies
- Keycloak session cookies remained valid but could expire independently
- Iframe apps rely on Keycloak cookies for SSO
- When Keycloak cookies expired/invalidated, iframes logged out but dashboard stayed logged in
**Fix Applied**:
1. **ID Token Storage** (`app/api/auth/options.ts`):
- Now stores `idToken` from Keycloak in JWT
- Exposes `idToken` in session object
- Preserves ID token during token refresh
2. **Proper Keycloak Logout**:
- Sign-out now calls Keycloak logout endpoint with `id_token_hint`
- This properly invalidates Keycloak session and clears Keycloak cookies
- Ensures synchronization between dashboard and iframe apps
3. **Type Definitions** (`types/next-auth.d.ts`):
- Added `idToken` to Session and JWT interfaces
- Type-safe access to ID token
---
## Changes Made
### Files Modified
1. **`app/api/auth/options.ts`**
- Added `idToken` to JWT interface
- Store `account.id_token` in JWT during initial authentication
- Expose `idToken` in session callback
- Preserve `idToken` during token refresh
2. **`app/signin/page.tsx`**
- Added session status check
- Prevent auto-login if already authenticated
- Redirect authenticated users to home
3. **`components/auth/signout-handler.tsx`**
- Call Keycloak logout endpoint with ID token
- Proper logout flow that clears both NextAuth and Keycloak sessions
4. **`components/main-nav.tsx`**
- Fixed logout button to use `idToken` instead of `accessToken`
- Proper Keycloak logout flow
5. **`types/next-auth.d.ts`**
- Added `idToken?: string` to Session interface
- Added `idToken?: string` to JWT interface (both modules)
---
## How It Works Now
### Sign-In Flow (Fixed)
```
1. User navigates to /signin
2. Check session status:
- If authenticated → Redirect to /
- If unauthenticated → Trigger Keycloak login
3. After Keycloak authentication:
- Store tokens (access, refresh, ID token)
- Initialize storage
- Redirect to dashboard
```
### Sign-Out Flow (Fixed)
```
1. User clicks logout
2. Sign out from NextAuth (clears NextAuth cookies)
3. Call Keycloak logout endpoint:
- URL: ${KEYCLOAK_ISSUER}/protocol/openid-connect/logout
- Parameters:
* post_logout_redirect_uri: /signin
* id_token_hint: <ID token from session>
4. Keycloak clears its session and cookies
5. Redirect to /signin (no auto-login loop)
```
### Iframe SSO (Fixed)
```
1. User authenticates in dashboard
2. Keycloak sets session cookies
3. Iframe apps read Keycloak cookies
4. When user logs out:
- Keycloak logout endpoint is called
- Keycloak cookies are cleared
- Iframe apps lose access (synchronized logout)
```
---
## Environment Variables Required
Ensure these are set:
```bash
# Required for logout
NEXT_PUBLIC_KEYCLOAK_ISSUER=https://keycloak.example.com/realms/neah
# Already required for authentication
KEYCLOAK_CLIENT_ID=neah-dashboard
KEYCLOAK_CLIENT_SECRET=<secret>
KEYCLOAK_ISSUER=https://keycloak.example.com/realms/neah
NEXTAUTH_URL=https://dashboard.example.com
NEXTAUTH_SECRET=<secret>
```
**Important**: `NEXT_PUBLIC_KEYCLOAK_ISSUER` must be set for client-side logout to work.
---
## Testing Checklist
### Logout Flow
- [ ] Click logout button
- [ ] Should redirect to Keycloak logout
- [ ] Should redirect back to /signin
- [ ] Should NOT auto-login (no loop)
- [ ] Should be able to manually log in again
### Sign-In Flow
- [ ] Navigate to /signin when not authenticated
- [ ] Should trigger Keycloak login
- [ ] Navigate to /signin when already authenticated
- [ ] Should redirect to / (no auto-login trigger)
### Iframe SSO
- [ ] Log in to dashboard
- [ ] Open iframe application
- [ ] Should be automatically authenticated
- [ ] Log out from dashboard
- [ ] Iframe application should also lose authentication
- [ ] Refresh iframe - should require login
---
## Additional Notes
### ID Token vs Access Token
- **Access Token**: Used for API calls to Keycloak-protected resources
- **ID Token**: Used for user identification and logout
- **Refresh Token**: Used to get new access tokens
The ID token is required for proper Keycloak logout. It tells Keycloak which session to invalidate.
### Cookie Synchronization
The fix ensures that:
1. NextAuth cookies are cleared (dashboard logout)
2. Keycloak cookies are cleared (via logout endpoint)
3. Both happen in sequence, maintaining synchronization
### Token Refresh
During token refresh, the ID token is preserved (Keycloak doesn't issue new ID tokens on refresh). This ensures logout continues to work even after token refreshes.
---
## Troubleshooting
### If logout still loops:
1. Check browser console for errors
2. Verify `NEXT_PUBLIC_KEYCLOAK_ISSUER` is set correctly
3. Check that Keycloak logout endpoint is accessible
4. Verify ID token is present in session: `console.log(session?.idToken)`
### If iframes still log out independently:
1. Check Keycloak cookie domain configuration
2. Verify iframe apps are configured to use same Keycloak realm
3. Check browser cookie settings (third-party cookies may be blocked)
4. Verify Keycloak session timeout settings
---
**Date**: 2024
**Status**: ✅ Fixed
**Version**: 1.0

View File

@ -1,988 +0,0 @@
# Authentication Flow Audit - NextAuth with Keycloak & SSO for Iframe Applications
## Executive Summary
This document provides a comprehensive audit of the authentication architecture in the Neah dashboard application. The system uses **NextAuth.js v4** with **Keycloak** as the OAuth provider, implementing JWT-based sessions and supporting Single Sign-On (SSO) for multiple iframe-embedded applications via cookie-based authentication.
---
## Architecture Overview
### Components
1. **NextAuth.js** - Authentication framework
2. **Keycloak** - Identity Provider (IdP) via OAuth 2.0/OpenID Connect
3. **JWT Strategy** - Session management (no database sessions)
4. **Iframe Applications** - Multiple embedded applications using SSO via cookies
5. **Keycloak Admin Client** - Server-side user management
---
## 1. Authentication Entry Points
### 1.1 Sign-In Page (`/app/signin/page.tsx`)
**Location**: `app/signin/page.tsx`
**Flow**:
```typescript
1. User navigates to /signin
2. Component automatically triggers: signIn("keycloak", { callbackUrl: "/" })
3. Redirects to Keycloak authorization endpoint
4. After Keycloak authentication, initializes storage via /api/storage/init
5. Redirects to home page
```
**Key Methods**:
- `signIn("keycloak")` - NextAuth client-side method
- Automatic redirect to Keycloak OAuth flow
- Storage initialization after successful authentication
**Dependencies**:
- `next-auth/react` - Client-side NextAuth hooks
- Storage API endpoint for user space initialization
---
## 2. NextAuth Configuration
### 2.1 Route Handler (`/app/api/auth/[...nextauth]/route.ts`)
**Location**: `app/api/auth/[...nextauth]/route.ts`
**Purpose**: NextAuth API route handler for all authentication endpoints
**Endpoints Handled**:
- `GET/POST /api/auth/signin` - Sign in
- `GET/POST /api/auth/signout` - Sign out
- `GET /api/auth/session` - Get current session
- `GET /api/auth/csrf` - CSRF token
- `GET /api/auth/providers` - Available providers
- `GET /api/auth/callback/keycloak` - OAuth callback
**Implementation**:
```typescript
import NextAuth from "next-auth";
import { authOptions } from "../options";
const handler = NextAuth(authOptions);
export { handler as GET, handler as POST };
```
---
### 2.2 Auth Options Configuration (`/app/api/auth/options.ts`)
**Location**: `app/api/auth/options.ts`
**This is the core authentication configuration file.**
#### 2.2.1 Keycloak Provider Setup
```typescript
KeycloakProvider({
clientId: getRequiredEnvVar("KEYCLOAK_CLIENT_ID"),
clientSecret: getRequiredEnvVar("KEYCLOAK_CLIENT_SECRET"),
issuer: getRequiredEnvVar("KEYCLOAK_ISSUER"),
authorization: {
params: {
scope: "openid profile email roles" // Requested OAuth scopes
}
},
profile(profile) { /* Profile transformation */ }
})
```
**Environment Variables Required**:
- `KEYCLOAK_CLIENT_ID` - OAuth client identifier
- `KEYCLOAK_CLIENT_SECRET` - OAuth client secret
- `KEYCLOAK_ISSUER` - Keycloak realm issuer URL (e.g., `https://keycloak.example.com/realms/neah`)
**OAuth Scopes Requested**:
- `openid` - OpenID Connect core
- `profile` - User profile information
- `email` - User email address
- `roles` - User roles from Keycloak realm
#### 2.2.2 Profile Callback
**Location**: Lines 109-137 in `options.ts`
**Purpose**: Transforms Keycloak user profile into NextAuth user object
**Process**:
1. Receives Keycloak profile with `realm_access.roles`
2. Extracts roles from `realm_access.roles` array
3. Cleans roles by:
- Removing `ROLE_` prefix (if present)
- Converting to lowercase
4. Maps Keycloak profile fields to NextAuth user:
- `sub``id`
- `name` or `preferred_username``name`
- `email``email`
- `given_name``first_name`
- `family_name``last_name`
- `preferred_username``username`
- Cleaned roles → `role[]`
**Code Flow**:
```typescript
profile(profile) {
const roles = profile.realm_access?.roles || [];
const cleanRoles = roles.map((role: string) =>
role.replace(/^ROLE_/, '').toLowerCase()
);
return {
id: profile.sub,
name: profile.name ?? profile.preferred_username,
email: profile.email,
first_name: profile.given_name ?? '',
last_name: profile.family_name ?? '',
username: profile.preferred_username ?? profile.email?.split('@')[0] ?? '',
role: cleanRoles,
}
}
```
#### 2.2.3 Session Configuration
**Location**: Lines 140-143
```typescript
session: {
strategy: "jwt", // JWT-based sessions (no database)
maxAge: 30 * 24 * 60 * 60, // 30 days
}
```
**Characteristics**:
- **Strategy**: JWT (stateless, no database lookups)
- **Max Age**: 30 days (2,592,000 seconds)
- **Storage**: Encrypted JWT stored in HTTP-only cookies
#### 2.2.4 JWT Callback
**Location**: Lines 145-181
**Purpose**: Handles JWT token creation and refresh
**Flow**:
**Initial Authentication (account & profile present)**:
```typescript
if (account && profile) {
1. Extract roles from Keycloak profile
2. Clean roles (remove ROLE_ prefix, lowercase)
3. Store in JWT token:
- accessToken: account.access_token (Keycloak access token)
- refreshToken: account.refresh_token (Keycloak refresh token)
- accessTokenExpires: account.expires_at (expiration timestamp)
- sub: Keycloak user ID
- role: cleaned roles array
- username, first_name, last_name: from profile
}
```
**Subsequent Requests (token refresh check)**:
```typescript
else if (token.accessToken) {
1. Decode JWT to extract roles (if not already in token)
2. Check if token is expired:
- If expired: Call refreshAccessToken()
- If valid: Return existing token
}
```
**Token Expiration Check**:
```typescript
if (Date.now() < (token.accessTokenExpires as number) * 1000) {
return token; // Token still valid
}
return refreshAccessToken(token); // Token expired, refresh
```
**Note**: There's a **BUG** in line 176 - it multiplies `accessTokenExpires` by 1000, but `expires_at` from Keycloak is already in seconds since epoch. This should be checked.
#### 2.2.5 Token Refresh Function
**Location**: Lines 64-96
**Purpose**: Refreshes expired Keycloak access tokens
**Implementation**:
```typescript
async function refreshAccessToken(token: JWT) {
1. POST to Keycloak token endpoint:
- URL: ${KEYCLOAK_ISSUER}/protocol/openid-connect/token
- Method: POST
- Body:
* client_id: KEYCLOAK_CLIENT_ID
* client_secret: KEYCLOAK_CLIENT_SECRET
* grant_type: refresh_token
* refresh_token: token.refreshToken
2. On Success:
- Update accessToken
- Update refreshToken (if new one provided)
- Update accessTokenExpires: Date.now() + expires_in * 1000
3. On Error:
- Set token.error = "RefreshAccessTokenError"
- Return token with error flag
}
```
**Error Handling**: Sets `token.error` flag which is checked in session callback
#### 2.2.6 Session Callback
**Location**: Lines 182-202
**Purpose**: Transforms JWT token into session object for client-side use
**Flow**:
```typescript
async session({ session, token }) {
1. Check for refresh errors:
if (token.error) throw new Error(token.error)
2. Build session.user object:
- id: token.sub (Keycloak user ID)
- email: token.email
- name: token.name
- image: null
- username: token.username
- first_name: token.first_name
- last_name: token.last_name
- role: token.role (array)
- nextcloudInitialized: false (default)
3. Add accessToken to session:
session.accessToken = token.accessToken
4. Return session
}
```
**Important**: The `accessToken` (Keycloak OAuth token) is exposed in the session object, making it available client-side via `useSession()` hook.
#### 2.2.7 Custom Pages
**Location**: Lines 204-207
```typescript
pages: {
signIn: '/signin',
error: '/signin',
}
```
**Custom Routes**:
- Sign-in page: `/signin` (instead of default `/api/auth/signin`)
- Error page: `/signin` (redirects to sign-in on errors)
---
## 3. Authentication Flow Step-by-Step
### 3.1 Initial Sign-In Flow
```
┌─────────────┐
│ Browser │
└──────┬──────┘
│ 1. GET /signin
┌─────────────────────┐
│ /app/signin/page.tsx │
│ - Auto-triggers │
│ signIn("keycloak") │
└──────┬──────────────┘
│ 2. Redirect to NextAuth
┌──────────────────────────────┐
│ /api/auth/signin/keycloak │
│ - Generates OAuth state │
│ - Redirects to Keycloak │
└──────┬───────────────────────┘
│ 3. GET /realms/{realm}/protocol/openid-connect/auth
│ ?client_id=...
&redirect_uri=...
&response_type=code
&scope=openid profile email roles
&state=...
┌─────────────────────┐
│ Keycloak Server │
│ - Login page │
│ - User credentials │
└──────┬──────────────┘
│ 4. User authenticates
│ 5. POST /realms/{realm}/protocol/openid-connect/token
│ (Authorization code exchange)
│ 6. Keycloak returns:
│ - access_token
│ - refresh_token
│ - id_token
│ - expires_in
┌──────────────────────────────┐
│ /api/auth/callback/keycloak │
│ - Receives authorization code│
│ - Exchanges for tokens │
│ - Fetches user profile │
└──────┬───────────────────────┘
│ 7. JWT Callback
│ - Stores tokens in JWT
│ - Extracts user info
│ - Cleans roles
│ 8. Session Callback
│ - Builds session object
│ 9. Sets NextAuth cookies:
│ - next-auth.session-token (encrypted JWT)
│ - next-auth.csrf-token
┌─────────────────────┐
│ Browser (Client) │
│ - Cookies set │
│ - Redirect to / │
└──────┬──────────────┘
│ 10. GET / (home page)
│ - getServerSession() validates JWT
│ - Session available
┌─────────────────────┐
│ Dashboard Loaded │
└─────────────────────┘
```
### 3.2 Subsequent Request Flow (Authenticated)
```
┌─────────────┐
│ Browser │
└──────┬──────┘
│ 1. GET /any-page
│ Cookie: next-auth.session-token=...
┌──────────────────────────────┐
│ Next.js Server │
│ getServerSession(authOptions)│
└──────┬───────────────────────┘
│ 2. Decrypt JWT from cookie
│ 3. Check token expiration
│ 4a. If valid:
│ - Extract user info
│ - Return session
│ 4b. If expired:
│ - Call refreshAccessToken()
│ - POST to Keycloak /token
│ - Update JWT with new tokens
│ - Return session
┌─────────────────────┐
│ Page Component │
│ - session available│
└─────────────────────┘
```
### 3.3 Token Refresh Flow
```
┌─────────────────────┐
│ JWT Callback │
│ (Token expired) │
└──────┬──────────────┘
│ 1. Call refreshAccessToken()
│ 2. POST ${KEYCLOAK_ISSUER}/protocol/openid-connect/token
│ Body:
│ - client_id
│ - client_secret
│ - grant_type: refresh_token
│ - refresh_token: <current_refresh_token>
┌─────────────────────┐
│ Keycloak Server │
│ - Validates refresh│
│ token │
│ - Issues new tokens│
└──────┬──────────────┘
│ 3. Returns:
│ - access_token (new)
│ - refresh_token (new, optional)
│ - expires_in
┌─────────────────────┐
│ Update JWT Token │
│ - New accessToken │
│ - New refreshToken │
│ - New expires time │
└──────┬──────────────┘
│ 4. Return updated token
│ 5. Session callback builds session
┌─────────────────────┐
│ Session Available │
└─────────────────────┘
```
---
## 4. Iframe SSO Architecture
### 4.1 Overview
The dashboard embeds multiple applications in iframes. These applications rely on **cookie-based SSO** to authenticate users automatically using the Keycloak session established in the parent dashboard.
### 4.2 Iframe Application Pages
**Pattern**: All iframe pages follow the same structure:
```typescript
// Example: app/parole/page.tsx
export default async function Page() {
const session = await getServerSession(authOptions);
if (!session) {
redirect("/signin");
}
return (
<ResponsiveIframe
src={process.env.NEXT_PUBLIC_IFRAME_PAROLE_URL || ''}
/>
);
}
```
**Iframe Applications Identified**:
1. **Parole** (`/parole`) - `NEXT_PUBLIC_IFRAME_PAROLE_URL`
2. **Agilite** (`/agilite`) - `NEXT_PUBLIC_IFRAME_AGILITY_URL`
3. **Alma** (`/alma`) - `NEXT_PUBLIC_IFRAME_AI_ASSISTANT_URL`
4. **Vision** (`/vision`) - `NEXT_PUBLIC_IFRAME_CONFERENCE_URL`
5. **The Message** (`/the-message`) - `NEXT_PUBLIC_IFRAME_THEMESSAGE_URL`
6. **WP Admin** (`/wp-admin`) - `NEXT_PUBLIC_IFRAME_MISSIONVIEW_URL`
7. **Mediation** (`/mediation`) - `NEXT_PUBLIC_IFRAME_MEDIATIONS_URL`
8. **Apprendre** (`/apprendre`) - `NEXT_PUBLIC_IFRAME_LEARN_URL`
9. **Gite** (`/gite`) - `NEXT_PUBLIC_IFRAME_GITE_URL`
10. **Artlab** (`/artlab`) - `NEXT_PUBLIC_IFRAME_ARTLAB_URL`
11. **Calcul** (`/calcul`) - `NEXT_PUBLIC_IFRAME_CALCULATION_URL`
12. **Chapitre** (`/chapitre`) - `NEXT_PUBLIC_IFRAME_CHAPTER_URL`
13. **Dossiers** (`/dossiers`) - `NEXT_PUBLIC_IFRAME_DRIVE_URL`
14. **CRM** (`/crm`) - `NEXT_PUBLIC_IFRAME_MEDIATIONS_URL`
15. **Livres** (`/livres`) - `NEXT_PUBLIC_IFRAME_LIVRE_URL`
16. **Showcase** (`/showcase`) - `NEXT_PUBLIC_IFRAME_SHOWCASE_URL`
17. **Radio** (`/radio`) - `NEXT_PUBLIC_IFRAME_RADIO_URL`
18. **Press** (`/press`) - `NEXT_PUBLIC_IFRAME_SHOWCASE_URL`
19. **Observatory** - `NEXT_PUBLIC_IFRAME_OBSERVATORY_URL`
20. **Time Tracker** - `NEXT_PUBLIC_IFRAME_TIMETRACKER_URL`
21. **Missions Board** - `NEXT_PUBLIC_IFRAME_MISSIONSBOARD_URL`
22. **Carnet** - `NEXT_PUBLIC_IFRAME_CARNET_URL`
### 4.3 SSO Cookie Mechanism
**How It Works**:
1. **Parent Dashboard Authentication**:
- User authenticates via Keycloak in the dashboard
- Keycloak sets authentication cookies (domain: Keycloak domain)
- NextAuth sets session cookies (domain: dashboard domain)
2. **Iframe Cookie Sharing**:
- When iframe loads, browser sends cookies for the iframe's domain
- If iframe application is on **same domain** or **subdomain** of Keycloak:
- Keycloak cookies are automatically sent
- Application can read Keycloak session cookies
- SSO works automatically
3. **Cross-Domain Considerations**:
- If iframe apps are on different domains, they need:
- Same Keycloak realm configuration
- Proper CORS settings
- Cookie domain configuration in Keycloak
- `SameSite=None; Secure` cookie attributes for cross-site
### 4.4 ResponsiveIframe Component
**Location**: `app/components/responsive-iframe.tsx`
**Features**:
- Auto-resizing based on viewport
- Hash synchronization (URL fragments)
- Full-screen support
**Important**: This component does **NOT** handle authentication - it's purely presentational. SSO relies on browser cookie behavior.
---
## 5. Sign-Out Flow
### 5.1 Sign-Out Page
**Location**: `app/signout/page.tsx`
**Implementation**:
```typescript
export default function SignOut() {
return (
<div>
<SignOutHandler />
<p>Déconnexion en cours...</p>
</div>
);
}
```
### 5.2 Sign-Out Handler
**Location**: `components/auth/signout-handler.tsx`
**Flow**:
```typescript
1. clearAuthCookies() - Clears NextAuth cookies client-side
2. signOut({ callbackUrl: "/signin", redirect: true })
- Calls NextAuth signout endpoint
- Invalidates session
- Redirects to /signin
```
### 5.3 Cookie Clearing
**Location**: `lib/session.ts` - `clearAuthCookies()`
**Implementation**:
```typescript
export function clearAuthCookies() {
const cookies = document.cookie.split(';');
for (const cookie of cookies) {
const [name] = cookie.split('=');
if (name.trim().startsWith('next-auth.') ||
name.trim().startsWith('__Secure-next-auth.') ||
name.trim().startsWith('__Host-next-auth.')) {
document.cookie = `${name.trim()}=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;`;
}
}
}
```
**Note**: This only clears NextAuth cookies. Keycloak cookies remain unless:
- User manually logs out of Keycloak
- Keycloak session expires
- Application calls Keycloak logout endpoint
### 5.4 Service Token Invalidation
**Location**: `lib/session.ts` - `invalidateServiceTokens()`
**Purpose**: Logs out from integrated services (RocketChat, Leantime, etc.)
**Services Handled**:
- RocketChat: `/api/v1/logout`
- Leantime: JSON-RPC logout method
**Note**: This function exists but may not be called during standard sign-out flow.
---
## 6. Server-Side Session Access
### 6.1 getServerSession()
**Usage Pattern** (seen in all iframe pages):
```typescript
import { getServerSession } from "next-auth/next";
import { authOptions } from "@/app/api/auth/options";
const session = await getServerSession(authOptions);
```
**How It Works**:
1. Reads `next-auth.session-token` cookie from request
2. Decrypts JWT using `NEXTAUTH_SECRET`
3. Validates token signature and expiration
4. If expired, triggers refresh (via JWT callback)
5. Returns session object
**Location**: Used in:
- All iframe page components
- Root layout (`app/layout.tsx`)
- Any server component needing authentication
### 6.2 Client-Side Session Access
**Usage Pattern**:
```typescript
import { useSession } from "next-auth/react";
const { data: session, status } = useSession();
```
**How It Works**:
1. `useSession()` hook calls `/api/auth/session`
2. Server decrypts JWT and returns session
3. Client receives session object
4. Automatically refetches when token refreshes
**Location**: Used in:
- `app/signin/page.tsx`
- `components/auth/auth-check.tsx`
- Any client component needing authentication
---
## 7. Keycloak Admin Client
### 7.1 Purpose
**Location**: `lib/keycloak.ts`
The Keycloak Admin Client is used for **server-side user management**, not for user authentication. It's a separate administrative interface.
### 7.2 Authentication Methods
**Two Methods Supported**:
1. **Client Credentials** (Preferred):
```typescript
grant_type: 'client_credentials'
client_id: KEYCLOAK_CLIENT_ID
client_secret: KEYCLOAK_CLIENT_SECRET
```
2. **Password Grant** (Fallback):
```typescript
grant_type: 'password'
client_id: KEYCLOAK_CLIENT_ID
username: KEYCLOAK_ADMIN_USERNAME
password: KEYCLOAK_ADMIN_PASSWORD
```
### 7.3 Caching
**Token Caching**: 5 minutes
- Validates cached token before reuse
- Creates new client if token invalid/expired
### 7.4 Functions
- `getKeycloakAdminClient()` - Get authenticated admin client
- `getUserById(userId)` - Get user by Keycloak ID
- `getUserByEmail(email)` - Get user by email
- `getAllRoles()` - Get all realm roles
- `getUserRoles(userId)` - Get user's role mappings
---
## 8. Security Considerations
### 8.1 Cookie Security
**NextAuth Cookie Configuration** (implicit):
- **HttpOnly**: Yes (prevents XSS access)
- **Secure**: Yes (if `NEXTAUTH_URL` starts with `https://`)
- **SameSite**: Lax (default)
- **Path**: `/`
- **Domain**: Dashboard domain
**Keycloak Cookie Configuration** (Keycloak-controlled):
- Set by Keycloak server
- Typically `SameSite=Lax` or `SameSite=None` (for cross-site)
- Domain: Keycloak domain or configured domain
### 8.2 Token Storage
- **Access Token**: Stored in encrypted JWT (server-side only accessible)
- **Refresh Token**: Stored in encrypted JWT
- **Session Token**: Encrypted JWT in HTTP-only cookie
**Client-Side Access**:
- `session.accessToken` is exposed to client via `useSession()`
- This is the Keycloak OAuth access token
- Can be used for API calls to Keycloak-protected resources
### 8.3 CORS & CSP
**Content Security Policy** (`next.config.mjs`):
```typescript
'Content-Security-Policy': "frame-ancestors 'self' https://espace.slm-lab.net https://connect.slm-lab.net"
```
**Allows framing from**:
- Same origin (`'self'`)
- `https://espace.slm-lab.net`
- `https://connect.slm-lab.net`
### 8.4 Role-Based Access Control
**Role Extraction**:
- Roles come from Keycloak `realm_access.roles`
- Cleaned: `ROLE_` prefix removed, lowercased
- Stored in session: `session.user.role[]`
**Usage**: Roles are available but not actively enforced in the codebase audit. Applications should implement RBAC checks.
---
## 9. Environment Variables
### 9.1 Required for Authentication
```bash
# Keycloak OAuth Configuration
KEYCLOAK_CLIENT_ID=neah-dashboard
KEYCLOAK_CLIENT_SECRET=<secret>
KEYCLOAK_ISSUER=https://keycloak.example.com/realms/neah
KEYCLOAK_REALM=neah
# NextAuth Configuration
NEXTAUTH_URL=https://dashboard.example.com
NEXTAUTH_SECRET=<random-secret>
# Keycloak Admin (optional, for user management)
KEYCLOAK_ADMIN_USERNAME=admin
KEYCLOAK_ADMIN_PASSWORD=<password>
KEYCLOAK_BASE_URL=https://keycloak.example.com
```
### 9.2 Iframe Application URLs
All iframe applications require `NEXT_PUBLIC_IFRAME_*` environment variables:
- `NEXT_PUBLIC_IFRAME_PAROLE_URL`
- `NEXT_PUBLIC_IFRAME_AGILITY_URL`
- `NEXT_PUBLIC_IFRAME_AI_ASSISTANT_URL`
- `NEXT_PUBLIC_IFRAME_CONFERENCE_URL`
- ... (see section 4.2 for complete list)
---
## 10. Potential Issues & Recommendations
### 10.1 Token Expiration Bug
**Location**: `app/api/auth/options.ts:176`
```typescript
if (Date.now() < (token.accessTokenExpires as number) * 1000) {
```
**Issue**: `accessTokenExpires` from Keycloak `account.expires_at` is already in seconds since epoch. Multiplying by 1000 assumes it's in milliseconds, which may cause incorrect expiration checks.
**Recommendation**: Verify Keycloak's `expires_at` format. If it's in seconds, remove the `* 1000`. If it's in milliseconds, keep it.
### 10.2 Cookie SameSite for Cross-Domain Iframes
**Issue**: If iframe applications are on different domains, Keycloak cookies may not be sent due to `SameSite` restrictions.
**Recommendation**:
- Configure Keycloak cookies with `SameSite=None; Secure`
- Ensure all domains use HTTPS
- Consider using a shared parent domain for cookies
### 10.3 Access Token Exposure
**Issue**: `session.accessToken` (Keycloak OAuth token) is exposed client-side.
**Recommendation**:
- Only expose if needed for client-side API calls
- Consider using proxy endpoints instead
- Implement token rotation if exposed
### 10.4 No Explicit Cookie Configuration
**Issue**: NextAuth cookie settings are implicit (defaults).
**Recommendation**: Explicitly configure cookies in `authOptions`:
```typescript
cookies: {
sessionToken: {
name: `next-auth.session-token`,
options: {
httpOnly: true,
sameSite: 'lax',
path: '/',
secure: process.env.NEXTAUTH_URL?.startsWith('https://') ?? false,
}
}
}
```
### 10.5 Storage Initialization
**Issue**: Storage initialization happens client-side after authentication, which may cause race conditions.
**Recommendation**: Move storage initialization to server-side or use a more robust initialization pattern.
### 10.6 Service Token Invalidation Not Called
**Issue**: `invalidateServiceTokens()` exists but may not be called during sign-out.
**Recommendation**: Integrate service token invalidation into the sign-out flow.
---
## 11. Flow Diagrams
### 11.1 Complete Authentication Flow
```
User → /signin
→ signIn("keycloak")
→ /api/auth/signin/keycloak
→ Keycloak Authorization Endpoint
→ User Login (Keycloak)
→ Keycloak Token Endpoint
→ /api/auth/callback/keycloak
→ JWT Callback (store tokens)
→ Session Callback (build session)
→ Set Cookies
→ Redirect to /
→ Storage Init
→ Dashboard Loaded
```
### 11.2 Iframe SSO Flow
```
Dashboard (authenticated)
→ User clicks iframe app link
→ Server checks session (getServerSession)
→ If authenticated: Load iframe
→ Browser sends cookies to iframe domain
→ Iframe app reads Keycloak cookies
→ Iframe app validates session
→ Iframe app loads authenticated
```
### 11.3 Token Refresh Flow
```
Request with expired token
→ getServerSession()
→ Decrypt JWT
→ Check expiration
→ If expired: JWT Callback
→ refreshAccessToken()
→ POST to Keycloak /token
→ Get new tokens
→ Update JWT
→ Return session
```
---
## 12. File Reference Map
### Core Authentication Files
| File | Purpose |
|------|---------|
| `app/api/auth/[...nextauth]/route.ts` | NextAuth route handler |
| `app/api/auth/options.ts` | **Main auth configuration** |
| `app/signin/page.tsx` | Sign-in page |
| `app/signout/page.tsx` | Sign-out page |
| `components/auth/signout-handler.tsx` | Sign-out logic |
| `components/auth/auth-check.tsx` | Client-side auth guard |
| `lib/keycloak.ts` | Keycloak admin client |
| `lib/session.ts` | Session utilities |
| `types/next-auth.d.ts` | TypeScript definitions |
### Iframe Application Files
All in `app/*/page.tsx`:
- `app/parole/page.tsx`
- `app/agilite/page.tsx`
- `app/alma/page.tsx`
- `app/vision/page.tsx`
- ... (see section 4.2)
### Supporting Files
| File | Purpose |
|------|---------|
| `app/components/responsive-iframe.tsx` | Iframe component |
| `app/layout.tsx` | Root layout (session check) |
| `components/providers.tsx` | SessionProvider wrapper |
| `components/layout/layout-wrapper.tsx` | Layout wrapper with auth |
---
## 13. Testing Checklist
### Authentication Flow
- [ ] Sign-in redirects to Keycloak
- [ ] Keycloak login works
- [ ] Callback receives tokens
- [ ] Session is created
- [ ] Cookies are set
- [ ] User redirected to dashboard
- [ ] Storage initializes
### Session Management
- [ ] Session persists across page reloads
- [ ] Token refresh works when expired
- [ ] Session expires after 30 days
- [ ] Invalid tokens are rejected
### Sign-Out
- [ ] Sign-out clears NextAuth cookies
- [ ] User redirected to sign-in
- [ ] Session invalidated
### Iframe SSO
- [ ] Iframe apps receive Keycloak cookies
- [ ] Iframe apps authenticate automatically
- [ ] Cross-domain cookies work (if applicable)
- [ ] Unauthenticated users redirected
### Security
- [ ] HttpOnly cookies enforced
- [ ] Secure cookies on HTTPS
- [ ] CSRF protection active
- [ ] Token encryption working
---
## 14. Conclusion
The authentication architecture uses a standard NextAuth + Keycloak OAuth 2.0 flow with JWT-based sessions. The system supports SSO for iframe applications via cookie sharing, assuming proper domain configuration.
**Key Strengths**:
- Standard OAuth 2.0/OpenID Connect implementation
- Stateless JWT sessions (scalable)
- Automatic token refresh
- Role-based user information
**Areas for Improvement**:
- Explicit cookie configuration
- Token expiration bug fix
- Service token invalidation integration
- Cross-domain cookie configuration verification
- Storage initialization robustness
---
**Document Version**: 1.0
**Last Updated**: 2024
**Audited By**: AI Assistant
**Next Review**: After implementing recommendations

View File

@ -1,163 +0,0 @@
# Fix Nginx CapRover - Erreur "upstream sent too big header"
## 🔍 Problème
Erreur 502 avec message Nginx :
```
upstream sent too big header while reading response header from upstream
```
**Cause** : Le cookie de session NextAuth (JWT avec tokens Keycloak) dépasse 4KB, la limite par défaut de Nginx.
## ✅ Solution : Modifier la configuration CapRover
### Option 1 : Via CapRover Dashboard (RECOMMANDÉ)
1. **Aller dans CapRover Dashboard**
2. **Sélectionner votre app** (hub.slm-lab.net)
3. **Aller dans "HTTP Settings"**
4. **Cliquer sur "Edit Nginx Configuration"** (si disponible)
5. **OU aller dans "App Configs" → "nginx"**
### Option 2 : Modifier le template Nginx directement
Si vous avez accès au serveur CapRover, modifier le template dans :
- `/captain/templates/nginx.conf` (template principal)
- OU créer un override dans votre app
## 📝 Configuration à Ajouter
**Dans le bloc `location /`**, ajouter ces directives **AVANT** `proxy_pass` :
```nginx
location / {
# ============================================
# FIX: Augmenter la limite des headers pour NextAuth
# ============================================
proxy_buffer_size 16k;
proxy_buffers 8 16k;
proxy_busy_buffers_size 32k;
large_client_header_buffers 4 32k;
# Timeouts (pour éviter les timeouts)
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
# Configuration proxy existante
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support (si activé)
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_http_version 1.1;
}
```
## 🔧 Configuration Complète Modifiée
Voici le bloc `location /` complet avec les corrections :
```nginx
location / {
# FIX: Headers trop grands pour NextAuth
proxy_buffer_size 16k;
proxy_buffers 8 16k;
proxy_busy_buffers_size 32k;
large_client_header_buffers 4 32k;
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
# Proxy configuration
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket (si activé)
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_http_version 1.1;
}
```
## 🎯 Méthode via CapRover Dashboard
### Si CapRover permet l'édition Nginx :
1. **Dashboard CapRover** → Votre app
2. **"App Configs"** → **"nginx"**
3. **Ajouter dans "Custom Nginx Configuration"** :
```nginx
location / {
proxy_buffer_size 16k;
proxy_buffers 8 16k;
proxy_busy_buffers_size 32k;
large_client_header_buffers 4 32k;
}
```
4. **Sauvegarder** → CapRover regénère la config
### Si CapRover ne permet pas l'édition :
**Option A** : Modifier le template CapRover (avancé)
- Accéder au serveur CapRover
- Modifier `/captain/templates/nginx.conf`
- Redémarrer CapRover
**Option B** : Créer un fichier de configuration personnalisé
- Créer un fichier dans votre app
- L'inclure dans la config Nginx
## 🔄 Après Modification
1. **Vérifier la config Nginx** :
```bash
sudo nginx -t
```
2. **Recharger Nginx** :
```bash
sudo systemctl reload nginx
# OU si CapRover gère Nginx
docker exec captain-nginx nginx -s reload
```
3. **Tester la connexion** :
- Se connecter via Keycloak
- Vérifier que l'erreur 502 ne se produit plus
## 📊 Explication
**Avant** :
- Limite par défaut Nginx : 4KB pour les headers
- Cookie NextAuth : ~4-7KB (JWT avec tokens Keycloak)
- Résultat : ❌ Erreur 502
**Après** :
- Limite augmentée : 32KB pour les headers
- Cookie NextAuth : ~4-7KB
- Résultat : ✅ Fonctionne
## ⚠️ Note Importante
Si vous modifiez le template CapRover directement, **vos modifications seront écrasées** lors d'une mise à jour de CapRover.
**Recommandation** : Utiliser la méthode "Custom Nginx Configuration" dans CapRover si disponible, ou documenter vos modifications pour les réappliquer après mise à jour.
---
**Document créé le** : $(date)
**Priorité** : HAUTE - Résout l'erreur 502

View File

@ -1,199 +0,0 @@
# Changelog - Améliorations Flow de Connexion
## Date : $(date)
### ✅ Modifications Effectuées
#### 1. **Suppression de `prompt=login` par défaut**
**Fichier** : `app/api/auth/options.ts`
- **Avant** : `prompt: "login"` toujours actif → empêchait SSO naturel
- **Après** : `prompt` supprimé → SSO fonctionne naturellement pour utilisateurs légitimes
- **Impact** : ✅ Meilleure UX - SSO fonctionne pour les utilisateurs légitimes
#### 2. **Création route API `/api/auth/mark-logout`**
**Nouveau fichier** : `app/api/auth/mark-logout/route.ts`
- Crée un cookie HttpOnly `force_login_prompt=true` (5 minutes)
- Utilisé pour marquer qu'un logout a eu lieu
- **Impact** : ✅ Mécanisme robuste pour détecter logout côté serveur
#### 3. **Modification signout-handler.tsx**
**Fichier** : `components/auth/signout-handler.tsx`
- Ajout appel à `/api/auth/mark-logout` avant logout
- **Impact** : ✅ Flag serveur créé pour forcer login après logout
#### 4. **Modification main-nav.tsx**
**Fichier** : `components/main-nav.tsx`
- Ajout appel à `/api/auth/mark-logout` dans le handler logout
- **Impact** : ✅ Logout depuis navigation marque correctement le logout
#### 5. **Modification layout-wrapper.tsx**
**Fichier** : `components/layout/layout-wrapper.tsx`
- Ajout appel à `/api/auth/mark-logout` lors de logout depuis iframe
- **Impact** : ✅ Logout depuis iframe marque correctement le logout
#### 6. **Simplification signin/page.tsx**
**Fichier** : `app/signin/page.tsx`
- **Avant** : Logique complexe avec vérifications multiples (cookies, sessionStorage, URL params)
- **Après** : Logique simplifiée basée sur cookie serveur `force_login_prompt`
- Suppression de la détection complexe de session invalide
- **Impact** : ✅ Code plus simple et maintenable, moins de race conditions
#### 7. **Configuration explicite des cookies NextAuth**
**Fichier** : `app/api/auth/options.ts`
- Ajout configuration explicite pour :
- `sessionToken`
- `callbackUrl`
- `csrfToken`
- `state`
- **Impact** : ✅ Meilleur contrôle et sécurité des cookies
#### 8. **Amélioration gestion erreur refresh token**
**Fichier** : `app/api/auth/options.ts`
- Détection améliorée des différents types d'erreurs :
- `SessionNotActive` (session invalidée)
- `RefreshTokenExpired` (token expiré)
- Suppression explicite des tokens lors d'erreurs
- Clear des erreurs précédentes lors de refresh réussi
- **Impact** : ✅ Meilleure détection et gestion des sessions invalides
---
## 🔄 Comportement Avant/Après
### Avant les modifications
**Login première visite** :
- ❌ Toujours demander credentials (même si SSO existe)
- ❌ Mauvaise UX
**Login après logout** :
- ⚠️ Peut auto-login si session SSO Keycloak existe
- ⚠️ Logique complexe de détection
**Logout** :
- ✅ Fonctionne mais peut laisser session SSO active
**Code** :
- ❌ Logique complexe dans signin/page.tsx
- ❌ Race conditions possibles
### Après les modifications
**Login première visite** :
- ✅ SSO fonctionne naturellement (pas de prompt si session existe)
- ✅ Meilleure UX
**Login après logout** :
- ✅ Session SSO terminée via `end-sso-session` (Admin API)
- ✅ Cookie `force_login_prompt` marque le logout
- ⚠️ Note: `prompt=login` n'est pas encore ajouté dynamiquement (limitation NextAuth)
- ✅ Mais session SSO est terminée donc credentials seront demandés
**Logout** :
- ✅ Appelle `mark-logout` pour créer cookie serveur
- ✅ Termine session SSO via Admin API
- ✅ Supprime cookies NextAuth
- ✅ Redirige vers Keycloak logout
**Code** :
- ✅ Logique simplifiée dans signin/page.tsx
- ✅ Moins de race conditions
- ✅ Configuration cookies explicite
---
## 📝 Notes Importantes
### Limitation Actuelle
**`prompt=login` dynamique** :
- NextAuth v4 ne permet pas facilement d'ajouter `prompt=login` dynamiquement
- Solution actuelle : Terminer la session SSO via Admin API (`end-sso-session`)
- **Impact** : Si session SSO est bien terminée, Keycloak demandera credentials de toute façon
- **Amélioration future possible** : Middleware Next.js pour intercepter et modifier l'URL Keycloak
### Solution de Contournement
Au lieu d'ajouter `prompt=login` dynamiquement, nous :
1. Terminons la session SSO Keycloak via Admin API (`end-sso-logout`)
2. Créons un cookie serveur (`force_login_prompt`) pour tracking
3. Laissons Keycloak gérer naturellement (sans session SSO, il demandera credentials)
---
## 🧪 Tests à Effectuer
### Test 1 : Login première visite
1. Ouvrir navigateur en navigation privée
2. Aller sur `/signin`
3. **Attendu** : Redirection vers Keycloak, SSO fonctionne si session existe
4. **Résultat** : ✅ SSO fonctionne (pas de prompt forcé)
### Test 2 : Login après logout
1. Se connecter
2. Se déconnecter
3. Cliquer "Se connecter"
4. **Attendu** : Keycloak demande credentials (session SSO terminée)
5. **Résultat** : ✅ Credentials demandés (session SSO terminée)
### Test 3 : Logout depuis dashboard
1. Se connecter
2. Cliquer "Déconnexion" dans navigation
3. **Attendu** : Redirection vers `/signin?logout=true`
4. **Résultat** : ✅ Logout fonctionne
### Test 4 : Logout depuis iframe
1. Se connecter
2. Ouvrir une application en iframe
3. Se déconnecter depuis l'iframe
4. **Attendu** : Dashboard se déconnecte aussi
5. **Résultat** : ✅ Logout synchronisé
### Test 5 : Expiration session
1. Se connecter
2. Attendre expiration (ou invalider session Keycloak)
3. **Attendu** : Redirection vers `/signin` avec message approprié
4. **Résultat** : ✅ Détection et redirection fonctionnent
---
## 🔧 Prochaines Améliorations Possibles
### Option 1 : Middleware pour ajouter `prompt=login`
Créer un middleware Next.js qui intercepte `/api/auth/signin/keycloak` et modifie l'URL Keycloak pour ajouter `prompt=login` si cookie `force_login_prompt` existe.
### Option 2 : Route custom signin
Créer une route custom qui gère complètement le flow OAuth avec `prompt=login` et utilise NextAuth pour valider le callback.
### Option 3 : Modifier Keycloak configuration
Configurer Keycloak pour toujours demander credentials après logout (configuration côté Keycloak).
---
## 📊 Fichiers Modifiés
1. ✅ `app/api/auth/options.ts` - Configuration NextAuth
2. ✅ `app/api/auth/mark-logout/route.ts` - Nouveau fichier
3. ✅ `components/auth/signout-handler.tsx` - Handler logout
4. ✅ `components/main-nav.tsx` - Navigation logout
5. ✅ `components/layout/layout-wrapper.tsx` - Layout logout iframe
6. ✅ `app/signin/page.tsx` - Page signin simplifiée
---
## ✅ Résumé
**8 modifications majeures effectuées** :
- ✅ SSO naturel fonctionne
- ✅ Logout marqué côté serveur
- ✅ Code simplifié
- ✅ Meilleure gestion erreurs
- ✅ Configuration cookies explicite
- ⚠️ `prompt=login` dynamique non implémenté (limitation NextAuth, mais contourné via end-sso-session)
**Impact global** : ✅ Flow de connexion amélioré, code plus maintenable, meilleure UX
---
**Document créé le** : $(date)

View File

@ -1,789 +0,0 @@
# Comprehensive Notification System Analysis & Improvement Recommendations
**Date**: 2026-01-06
**Purpose**: Complete step-by-step trace of notification system with improvement recommendations
---
## 📋 **Table of Contents**
1. [Architecture Overview](#architecture-overview)
2. [Complete Flow Traces](#complete-flow-traces)
3. [Current Issues Identified](#current-issues-identified)
4. [Improvement Recommendations](#improvement-recommendations)
5. [Performance Optimizations](#performance-optimizations)
6. [Reliability Improvements](#reliability-improvements)
7. [User Experience Enhancements](#user-experience-enhancements)
---
## 🏗️ **Architecture Overview**
### **Components**:
```
┌─────────────────────────────────────────────────────────────┐
│ UI Layer (React) │
│ ┌─────────────────────────────────────────────────────┐ │
│ │ NotificationBadge Component │ │
│ │ - Displays notification count badge │ │
│ │ - Dropdown with notification list │ │
│ │ - Mark as read / Mark all as read buttons │ │
│ └─────────────────────────────────────────────────────┘ │
│ ↓ │
│ ┌─────────────────────────────────────────────────────┐ │
│ │ useNotifications Hook │ │
│ │ - State management (notifications, count, loading) │ │
│ │ - Polling (60s interval) │ │
│ │ - Optimistic updates │ │
│ │ - Rate limiting (5s minimum between fetches) │ │
│ └─────────────────────────────────────────────────────┘ │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ API Routes (Next.js) │
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
│ │ GET /count │ │ GET /list │ │ POST /read │ │
│ │ │ │ │ │ POST /read-all│ │
│ └──────────────┘ └──────────────┘ └──────────────┘ │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ Service Layer (NotificationService) │
│ - Singleton pattern │
│ - Adapter pattern (LeantimeAdapter, future adapters) │
│ - Redis caching (count: 30s, list: 5min) │
│ - Cache invalidation │
│ - Background refresh scheduling │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ Adapter Layer (LeantimeAdapter) │
│ - User ID caching (1 hour TTL) │
│ - Retry logic (3 attempts, exponential backoff) │
│ - Direct API calls to Leantime │
│ - Notification transformation │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ External API (Leantime) │
│ - JSON-RPC API │
│ - getAllNotifications, markNotificationRead, etc. │
└─────────────────────────────────────────────────────────────┘
```
---
## 🔄 **Complete Flow Traces**
### **Flow 1: Initial Page Load & Count Display**
#### **Step-by-Step**:
1. **Component Mount** (`notification-badge.tsx`)
```
- Component renders
- useNotifications() hook initializes
- useEffect triggers when status === 'authenticated'
```
2. **Hook Initialization** (`use-notifications.ts`)
```
- Sets isMountedRef.current = true
- Calls fetchNotificationCount(true) - force refresh
- Calls fetchNotifications(1, 20)
- Starts polling: setInterval every 60 seconds
```
3. **Count Fetch** (`use-notifications.ts` → `/api/notifications/count`)
```
- Checks: session exists, isMounted, rate limit (5s)
- Makes GET request: /api/notifications/count?_t=${Date.now()}
- Cache-busting parameter added
```
4. **API Route** (`app/api/notifications/count/route.ts`)
```
- Authenticates user via getServerSession()
- Gets userId from session
- Calls NotificationService.getNotificationCount(userId)
```
5. **Service Layer** (`notification-service.ts`)
```
- Checks Redis cache: notifications:count:${userId}
- If cached: Returns cached data (30s TTL)
- If not cached: Fetches from adapters
```
6. **Adapter Layer** (`leantime-adapter.ts`)
```
- getNotificationCount() called
- Gets user email from session
- Gets Leantime user ID (checks cache first, then API with retry)
- Fetches up to 1000 notifications directly from API
- Counts unread: filter(n => n.read === 0)
- Returns count object
```
7. **Cache Storage** (`notification-service.ts`)
```
- Stores count in Redis: notifications:count:${userId}
- TTL: 30 seconds
- Returns to API route
```
8. **Response** (`app/api/notifications/count/route.ts`)
```
- Returns JSON with count
- Sets Cache-Control: private, max-age=10
```
9. **Hook Update** (`use-notifications.ts`)
```
- Receives count data
- Updates state: setNotificationCount(data)
```
10. **UI Update** (`notification-badge.tsx`)
```
- Badge displays notificationCount.unread
- Shows "60" if 60 unread notifications
```
---
### **Flow 2: Mark All Notifications as Read**
#### **Step-by-Step**:
1. **User Action** (`notification-badge.tsx`)
```
- User clicks "Mark all read" button
- Calls handleMarkAllAsRead()
- Calls markAllAsRead() from hook
```
2. **Optimistic Update** (`use-notifications.ts`)
```
- Immediately updates state:
* All notifications: isRead = true
* Count: unread = 0
- Provides instant UI feedback
```
3. **API Call** (`use-notifications.ts`)
```
- Makes POST to /api/notifications/read-all
- Waits for response
```
4. **API Route** (`app/api/notifications/read-all/route.ts`)
```
- Authenticates user
- Calls NotificationService.markAllAsRead(userId)
- Logs duration
```
5. **Service Layer** (`notification-service.ts`)
```
- Loops through all adapters
- For each adapter:
* Checks if configured
* Calls adapter.markAllAsRead(userId)
- Collects results
- Always invalidates cache (even on failure)
```
6. **Adapter Layer** (`leantime-adapter.ts`)
```
- Gets user email from session
- Gets Leantime user ID (cached or fetched with retry)
- Fetches all notifications from API (up to 1000)
- Filters unread: filter(n => n.read === 0)
- Marks each individually using Promise.all()
- Returns success if any were marked
```
7. **Cache Invalidation** (`notification-service.ts`)
```
- Deletes count cache: notifications:count:${userId}
- Deletes all list caches: notifications:list:${userId}:*
- Uses SCAN to avoid blocking Redis
```
8. **Count Refresh** (`use-notifications.ts`)
```
- After 200ms delay, calls fetchNotificationCount(true)
- Fetches fresh count from API
- Updates state with new count
```
---
### **Flow 3: Polling for Updates**
#### **Step-by-Step**:
1. **Polling Setup** (`use-notifications.ts`)
```
- setInterval created: 60 seconds
- Calls debouncedFetchCount() on each interval
```
2. **Debounced Fetch** (`use-notifications.ts`)
```
- Debounce delay: 300ms
- Prevents rapid successive calls
- Calls fetchNotificationCount(false)
```
3. **Rate Limiting** (`use-notifications.ts`)
```
- Checks: now - lastFetchTime < 5 seconds
- If too soon, skips fetch
```
4. **Count Fetch** (same as Flow 1, steps 3-10)
```
- Fetches from API
- Updates count if changed
```
---
## 🐛 **Current Issues Identified**
### **Issue #1: Multiple Fetching Mechanisms**
**Problem**:
- `useNotifications` has its own polling (60s)
- `NotificationService` has background refresh
- `NotificationBadge` has manual fetch on open
- No coordination between them
**Impact**:
- Redundant API calls
- Inconsistent refresh timing
- Potential race conditions
---
### **Issue #2: Mark All As Read - Sequential Processing**
**Problem**:
- Marks all notifications in parallel using `Promise.all()`
- No batching or rate limiting
- Can overwhelm Leantime API
- Connection resets on large batches (60+ notifications)
**Impact**:
- Partial failures (some marked, some not)
- Network timeouts
- Poor user experience
---
### **Issue #3: Cache TTL Mismatch**
**Problem**:
- Count cache: 30 seconds
- List cache: 5 minutes
- Client cache: 10 seconds (count), 30 seconds (list)
- Background refresh: 1 minute cooldown
**Impact**:
- Stale data inconsistencies
- Count and list can be out of sync
- Confusing UX
---
### **Issue #4: No Progress Feedback**
**Problem**:
- Mark all as read shows no progress
- User doesn't know how many are being marked
- No indication if operation is still running
**Impact**:
- Poor UX
- User might click multiple times
- No way to cancel operation
---
### **Issue #5: Optimistic Updates Can Be Wrong**
**Problem**:
- Hook optimistically sets count to 0
- But operation might fail or be partial
- Count refresh after 200ms might show different value
- Count jumps: 60 → 0 → 40 (confusing)
**Impact**:
- Confusing UX
- User thinks operation failed when it partially succeeded
---
### **Issue #6: No Retry for Mark All As Read**
**Problem**:
- If connection resets during marking, operation fails
- No automatic retry for failed notifications
- User must manually retry
**Impact**:
- Partial success requires manual intervention
- Poor reliability
---
### **Issue #7: Session Lookup on Every Call**
**Problem**:
- `getUserEmail()` calls `getServerSession()` every time
- `getLeantimeUserId()` is cached, but email lookup is not
- Multiple session lookups per request
**Impact**:
- Performance overhead
- Potential session inconsistencies
---
### **Issue #8: No Connection Pooling**
**Problem**:
- Each API call creates new fetch request
- No connection reuse
- No request queuing
**Impact**:
- Slower performance
- Higher connection overhead
- Potential connection exhaustion
---
### **Issue #9: Background Refresh Uses setTimeout**
**Problem**:
- `scheduleBackgroundRefresh()` uses `setTimeout(0)`
- Not reliable in serverless environments
- Can be lost if server restarts
**Impact**:
- Background refresh might not happen
- Cache might become stale
---
### **Issue #10: No Unified Refresh Integration**
**Problem**:
- `useNotifications` has its own polling
- `RefreshManager` exists but not used
- `useUnifiedRefresh` hook exists but not integrated
**Impact**:
- Duplicate refresh logic
- Inconsistent refresh intervals
- Not using centralized refresh system
---
## 💡 **Improvement Recommendations**
### **Priority 1: Integrate Unified Refresh System**
**Current State**:
- `useNotifications` has custom polling (60s)
- `RefreshManager` exists but not used
- `useUnifiedRefresh` hook exists but not integrated
**Recommendation**:
- Replace custom polling with `useUnifiedRefresh`
- Use `REFRESH_INTERVALS.NOTIFICATIONS_COUNT` (30s)
- Remove duplicate polling logic
- Centralize all refresh management
**Benefits**:
- ✅ Consistent refresh intervals
- ✅ Reduced code duplication
- ✅ Better coordination with other widgets
- ✅ Easier to manage globally
---
### **Priority 2: Batch Mark All As Read**
**Current State**:
- Marks all notifications in parallel
- No batching or rate limiting
- Can overwhelm API
**Recommendation**:
- Process in batches of 10-20 notifications
- Add delay between batches (100-200ms)
- Show progress indicator
- Retry failed batches automatically
**Implementation**:
```typescript
// Pseudo-code
async markAllAsRead(userId: string): Promise<boolean> {
const BATCH_SIZE = 10;
const BATCH_DELAY = 200;
const batches = chunk(unreadNotifications, BATCH_SIZE);
for (const batch of batches) {
await Promise.all(batch.map(n => markAsRead(n.id)));
await delay(BATCH_DELAY);
// Update progress
}
}
```
**Benefits**:
- ✅ Prevents API overload
- ✅ Better error recovery
- ✅ Progress feedback
- ✅ More reliable
---
### **Priority 3: Fix Cache TTL Consistency**
**Current State**:
- Count cache: 30s
- List cache: 5min
- Client cache: 10s/30s
- Background refresh: 1min
**Recommendation**:
- Align all cache TTLs
- Count cache: 30s (matches refresh interval)
- List cache: 30s (same as count)
- Client cache: 0s (rely on server cache)
- Background refresh: 30s (matches TTL)
**Benefits**:
- ✅ Consistent data
- ✅ Count and list always in sync
- ✅ Predictable behavior
---
### **Priority 4: Add Progress Feedback**
**Current State**:
- No progress indication
- User doesn't know operation status
**Recommendation**:
- Show progress bar: "Marking X of Y..."
- Update in real-time as batches complete
- Show success/failure count
- Allow cancellation
**Benefits**:
- ✅ Better UX
- ✅ User knows what's happening
- ✅ Prevents multiple clicks
---
### **Priority 5: Improve Optimistic Updates**
**Current State**:
- Optimistically sets count to 0
- Might be wrong if operation fails
- Count jumps confusingly
**Recommendation**:
- Only show optimistic update if confident
- Show loading state instead of immediate 0
- Poll until count matches expected value
- Or: Show "Marking..." state instead of 0
**Benefits**:
- ✅ More accurate UI
- ✅ Less confusing
- ✅ Better error handling
---
### **Priority 6: Add Automatic Retry**
**Current State**:
- No retry for failed notifications
- User must manually retry
**Recommendation**:
- Track which notifications failed
- Automatically retry failed ones
- Exponential backoff
- Max 3 retries per notification
**Benefits**:
- ✅ Better reliability
- ✅ Automatic recovery
- ✅ Less manual intervention
---
### **Priority 7: Cache User Email**
**Current State**:
- `getUserEmail()` calls session every time
- Not cached
**Recommendation**:
- Cache user email in Redis (same TTL as user ID)
- Invalidate on session change
- Reduce session lookups
**Benefits**:
- ✅ Better performance
- ✅ Fewer session calls
- ✅ More consistent
---
### **Priority 8: Add Connection Pooling**
**Current State**:
- Each API call creates new fetch
- No connection reuse
**Recommendation**:
- Use HTTP agent with connection pooling
- Reuse connections
- Queue requests if needed
**Benefits**:
- ✅ Better performance
- ✅ Lower overhead
- ✅ More reliable connections
---
### **Priority 9: Replace setTimeout with Proper Scheduling**
**Current State**:
- Background refresh uses `setTimeout(0)`
- Not reliable in serverless
**Recommendation**:
- Use proper job queue (Bull, Agenda, etc.)
- Or: Use Next.js API route for background jobs
- Or: Use cron job for scheduled refreshes
**Benefits**:
- ✅ More reliable
- ✅ Works in serverless
- ✅ Better error handling
---
### **Priority 10: Add Request Deduplication**
**Current State**:
- Multiple components can trigger same fetch
- No deduplication
**Recommendation**:
- Use `requestDeduplicator` utility (already exists)
- Deduplicate identical requests within short window
- Share results between callers
**Benefits**:
- ✅ Fewer API calls
- ✅ Better performance
- ✅ Reduced server load
---
## ⚡ **Performance Optimizations**
### **1. Reduce API Calls**
**Current**:
- Polling every 60s
- Background refresh every 1min
- Manual fetch on dropdown open
- Count refresh after marking
**Optimization**:
- Use unified refresh (30s)
- Deduplicate requests
- Share cache between components
- Reduce redundant fetches
**Expected Improvement**: 50-70% reduction in API calls
---
### **2. Optimize Mark All As Read**
**Current**:
- All notifications in parallel
- No batching
- Can timeout
**Optimization**:
- Batch processing (10-20 at a time)
- Delay between batches
- Progress tracking
- Automatic retry
**Expected Improvement**: 80-90% success rate (vs current 60-70%)
---
### **3. Improve Cache Strategy**
**Current**:
- Inconsistent TTLs
- Separate caches
- No coordination
**Optimization**:
- Unified TTLs
- Coordinated invalidation
- Cache versioning
- Smart refresh
**Expected Improvement**: 30-40% faster response times
---
## 🛡️ **Reliability Improvements**
### **1. Better Error Handling**
**Current**:
- Basic try/catch
- Returns false on error
- No retry logic
**Improvement**:
- Retry with exponential backoff
- Circuit breaker pattern
- Graceful degradation
- Better error messages
---
### **2. Connection Resilience**
**Current**:
- Fails on connection reset
- No recovery
**Improvement**:
- Automatic retry
- Connection pooling
- Health checks
- Fallback mechanisms
---
### **3. Partial Failure Handling**
**Current**:
- All-or-nothing approach
- No tracking of partial success
**Improvement**:
- Track which notifications succeeded
- Retry only failed ones
- Report partial success
- Allow resume
---
## 🎨 **User Experience Enhancements**
### **1. Progress Indicators**
- Show "Marking X of Y..." during mark all
- Progress bar
- Success/failure count
- Estimated time remaining
---
### **2. Better Loading States**
- Skeleton loaders
- Optimistic updates with loading overlay
- Smooth transitions
- No jarring count jumps
---
### **3. Error Messages**
- User-friendly error messages
- Actionable suggestions
- Retry buttons
- Help text
---
### **4. Real-time Updates**
- WebSocket/SSE for real-time updates
- Instant count updates
- No polling needed
- Better UX
---
## 📊 **Summary of Improvements**
### **High Priority** (Implement First):
1. ✅ Integrate unified refresh system
2. ✅ Batch mark all as read
3. ✅ Fix cache TTL consistency
4. ✅ Add progress feedback
### **Medium Priority**:
5. ✅ Improve optimistic updates
6. ✅ Add automatic retry
7. ✅ Cache user email
8. ✅ Add request deduplication
### **Low Priority** (Nice to Have):
9. ✅ Connection pooling
10. ✅ Replace setTimeout with proper scheduling
11. ✅ WebSocket/SSE for real-time updates
---
## 🎯 **Expected Results After Improvements**
### **Performance**:
- 50-70% reduction in API calls
- 30-40% faster response times
- 80-90% success rate for mark all
### **Reliability**:
- Automatic retry for failures
- Better error recovery
- More consistent behavior
### **User Experience**:
- Progress indicators
- Better loading states
- Clearer error messages
- Smoother interactions
---
**Status**: Analysis complete. Ready for implementation prioritization.

View File

@ -1,304 +0,0 @@
# Courrier User Management with Prisma
## Overview
**Important**: Courrier (the email system) does **NOT** create User records in Prisma. It only manages email account credentials (`MailCredentials`) for users that already exist in the database.
## User Creation Flow
### 1. User Creation in Keycloak (Primary Source)
Users are created in **Keycloak** first, which is the primary authentication system:
**Location**: `app/api/users/route.ts` (POST method)
**Process**:
1. User is created in Keycloak via Admin API
2. Roles are assigned to the user
3. User may be created in external systems:
- **Leantime** (project management tool)
- **Dolibarr** (if user has "Mediation" or "Expression" roles)
**Key Code**:
```typescript
// Create user in Keycloak
const createResponse = await fetch(
`${process.env.KEYCLOAK_BASE_URL}/admin/realms/${process.env.KEYCLOAK_REALM}/users`,
{
method: "POST",
headers: {
Authorization: `Bearer ${token}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
username: data.username,
enabled: true,
emailVerified: true,
firstName: data.firstName,
lastName: data.lastName,
email: data.email,
credentials: [{ type: "password", value: data.password, temporary: false }],
}),
}
);
```
### 2. User Sync to Prisma Database
After creation in Keycloak, users need to be synced to the Prisma database. This happens via:
**Option A: Manual Sync Script**
- `scripts/sync-users.ts` or `scripts/sync-users.js`
- Fetches users from Keycloak API
- Creates/updates User records in Prisma
**Option B: API Endpoint**
- `app/api/sync-users/route.ts` (GET method)
- Can be called to sync users programmatically
**Prisma User Creation**:
```typescript
await prisma.user.create({
data: {
id: user.id, // Use the Keycloak ID as primary ID
email: user.email,
password: tempPassword, // Temporary password (not used for auth)
createdAt: new Date(),
updatedAt: new Date(),
},
});
```
**Important Notes**:
- The Prisma User `id` field uses the **Keycloak user ID** (UUID)
- The `password` field in Prisma is not used for authentication (Keycloak handles that)
- Users must exist in Prisma before they can use Courrier
### 3. Prisma Schema
**User Model** (`prisma/schema.prisma`):
```prisma
model User {
id String @id @default(uuid())
email String @unique
password String
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
mailCredentials MailCredentials[] // One-to-many relationship
// ... other relations
}
```
**MailCredentials Model**:
```prisma
model MailCredentials {
id String @id @default(uuid())
userId String
email String
password String? // Optional (for OAuth accounts)
host String
port Int
secure Boolean @default(true)
use_oauth Boolean @default(false)
refresh_token String?
access_token String?
token_expiry DateTime?
smtp_host String?
smtp_port Int?
smtp_secure Boolean? @default(false)
display_name String?
color String? @default("#0082c9")
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
@@unique([userId, email]) // One email account per user
@@index([userId])
}
```
## Courrier's Role: Adding Email Accounts
### How Courrier Adds Email Accounts
**Location**: `app/api/courrier/account/route.ts` (POST method)
**Process**:
1. **Authentication Check**: Verifies user session exists
2. **User Existence Check**: Verifies user exists in Prisma database
3. **Connection Test**: Tests IMAP connection before saving
4. **Save Credentials**: Creates/updates `MailCredentials` record
**Key Code Flow**:
```typescript
// 1. Check if user exists in database
const userExistsInDB = await userExists(session.user.id);
if (!userExistsInDB) {
return NextResponse.json({
error: 'User not found in database',
details: `The user ID from your session (${session.user.id}) doesn't exist in the database.`
}, { status: 400 });
}
// 2. Test connection
const testResult = await testEmailConnection(credentials);
if (!testResult.imap) {
return NextResponse.json({
error: `Connection test failed: ${testResult.error}`
}, { status: 400 });
}
// 3. Save credentials
await saveUserEmailCredentials(session.user.id, email, credentials);
```
### Saving Email Credentials
**Location**: `lib/services/email-service.ts``saveUserEmailCredentials()`
**Process**:
1. Prepares database credentials object (excluding OAuth tokens)
2. Uses `upsert` to create or update `MailCredentials`
3. Caches full credentials (including OAuth tokens) in Redis
**Key Code**:
```typescript
// Save to database using upsert
await prisma.mailCredentials.upsert({
where: {
// Finds existing record by userId + email
userId_email: {
userId: userId,
email: credentials.email
}
},
update: dbCredentials,
create: {
userId,
...dbCredentials
}
});
// Cache full credentials (including OAuth) in Redis
await cacheEmailCredentials(userId, accountId, fullCreds);
```
**Important Notes**:
- OAuth tokens (access_token, refresh_token) are stored in **Redis only**, not in Prisma
- The Prisma `MailCredentials` table stores IMAP/SMTP settings
- The `password` field is optional (for OAuth accounts like Microsoft)
### Microsoft OAuth Flow
**Location**: `app/api/courrier/microsoft/callback/route.ts`
For Microsoft accounts, the flow is:
1. User authorizes via Microsoft OAuth
2. Access token and refresh token are obtained
3. Credentials are saved with `use_oauth: true`
4. OAuth tokens are cached in Redis (not in Prisma)
## Data Flow Diagram
```
┌─────────────┐
│ Keycloak │ ← Primary user creation
└──────┬──────┘
│ Sync
┌─────────────┐
│ Prisma │ ← User record created
│ User │
└──────┬──────┘
│ User adds email account
┌─────────────┐
│ Prisma │ ← MailCredentials created
│MailCredentials│
└──────┬──────┘
│ OAuth tokens (if applicable)
┌─────────────┐
│ Redis │ ← OAuth tokens cached
└─────────────┘
```
## Key Files Reference
### User Creation
- `app/api/users/route.ts` - Creates users in Keycloak
- `scripts/sync-users.ts` - Syncs users from Keycloak to Prisma
- `app/api/sync-users/route.ts` - API endpoint for syncing users
### Courrier Email Management
- `app/api/courrier/account/route.ts` - Add/update/delete email accounts
- `lib/services/email-service.ts` - Core email service functions
- `saveUserEmailCredentials()` - Saves email credentials to Prisma
- `getUserEmailCredentials()` - Retrieves credentials from Prisma
- `testEmailConnection()` - Tests IMAP/SMTP connection
### Database Schema
- `prisma/schema.prisma` - Prisma schema definitions
- `lib/prisma.ts` - Prisma client instance
### Authentication
- `app/api/auth/options.ts` - NextAuth configuration
- `lib/auth.ts` - Authentication helpers
## Auto-Creation of Users
**As of recent updates**, Courrier now automatically creates User records in Prisma if they don't exist when:
- Adding an email account (`/api/courrier/account` POST)
- Checking session status (`/api/courrier/session` GET)
This handles cases where:
- The database was reset/lost but users still exist in Keycloak
- Users were created in Keycloak but never synced to Prisma
The auto-creation uses session data from Keycloak to populate:
- `id`: Keycloak user ID (UUID)
- `email`: User's email from session
- `password`: Temporary random password (not used for auth, Keycloak handles authentication)
## Common Issues & Solutions
### Issue: "User not found in database" when adding email account
**Cause**: User exists in Keycloak but not in Prisma database
**Solution**:
- **Automatic**: The system now auto-creates users when needed
- **Manual**: Run the sync script to create users in Prisma:
```bash
npm run sync-users
# or
node scripts/sync-users.js
```
### Issue: Email credentials not saving
**Check**:
1. User exists in Prisma: `prisma.user.findUnique({ where: { id: userId } })`
2. Connection test passes before saving
3. Unique constraint `[userId, email]` is not violated
### Issue: OAuth tokens not persisting
**Note**: OAuth tokens are stored in Redis, not Prisma. Check:
- Redis connection and TTL settings
- Redis cache functions in `lib/redis.ts`
## Summary
1. **Users are created in Keycloak first** (via `app/api/users/route.ts`)
2. **Users are synced to Prisma** (via sync scripts or API)
3. **Courrier adds email accounts** by creating `MailCredentials` records linked to existing Users
4. **OAuth tokens are cached in Redis**, not stored in Prisma
5. **Users must exist in Prisma** before they can add email accounts via Courrier
Courrier is a **credentials management system** for existing users, not a user creation system.

View File

@ -1,307 +0,0 @@
# Critical Fixes - Quick Reference Guide
## 🚨 Top 5 Critical Fixes (Do These First)
### 1. Fix useNotifications Memory Leak ⚠️ CRITICAL
**File**: `hooks/use-notifications.ts`
**Line**: 239-255
**Problem**: Cleanup function not properly placed, causing memory leaks
**Quick Fix**:
```typescript
useEffect(() => {
if (status !== 'authenticated' || !session?.user) return;
isMountedRef.current = true;
// Initial fetch
fetchNotificationCount(true);
fetchNotifications();
// Start polling with proper cleanup
const intervalId = setInterval(() => {
if (isMountedRef.current) {
debouncedFetchCount();
}
}, POLLING_INTERVAL);
// ✅ Proper cleanup
return () => {
isMountedRef.current = false;
clearInterval(intervalId);
};
}, [status, session?.user?.id]); // ✅ Only primitive dependencies
```
---
### 2. Fix Notification Badge Double Fetching ⚠️ CRITICAL
**File**: `components/notification-badge.tsx`
**Lines**: 65-70, 82-87, 92-99
**Problem**: Three different places trigger the same fetch simultaneously
**Quick Fix**:
```typescript
// Add at top of component
const fetchInProgressRef = useRef(false);
const lastFetchRef = useRef<number>(0);
const FETCH_COOLDOWN = 1000; // 1 second cooldown
const manualFetch = async () => {
const now = Date.now();
// Prevent duplicate fetches
if (fetchInProgressRef.current) {
console.log('[NOTIFICATION_BADGE] Fetch already in progress');
return;
}
// Cooldown check
if (now - lastFetchRef.current < FETCH_COOLDOWN) {
console.log('[NOTIFICATION_BADGE] Too soon since last fetch');
return;
}
fetchInProgressRef.current = true;
lastFetchRef.current = now;
try {
await fetchNotifications(1, 10);
} finally {
fetchInProgressRef.current = false;
}
};
// Remove duplicate useEffect hooks, keep only one:
useEffect(() => {
if (isOpen && status === 'authenticated') {
manualFetch();
}
}, [isOpen, status]); // Only this one
```
---
### 3. Fix Redis KEYS Performance Issue ⚠️ CRITICAL
**File**: `lib/services/notifications/notification-service.ts`
**Line**: 293
**Problem**: `redis.keys()` blocks Redis and is O(N)
**Quick Fix**:
```typescript
// BEFORE (Line 293)
const listKeys = await redis.keys(listKeysPattern);
if (listKeys.length > 0) {
await redis.del(...listKeys);
}
// AFTER (Use SCAN)
const listKeys: string[] = [];
let cursor = '0';
do {
const [nextCursor, keys] = await redis.scan(
cursor,
'MATCH',
listKeysPattern,
'COUNT',
100
);
cursor = nextCursor;
if (keys.length > 0) {
listKeys.push(...keys);
}
} while (cursor !== '0');
if (listKeys.length > 0) {
await redis.del(...listKeys);
}
```
---
### 4. Fix Widget Interval Cleanup ⚠️ HIGH
**Files**:
- `components/calendar.tsx` (line 70)
- `components/parole.tsx` (line 83)
- `components/calendar/calendar-widget.tsx` (line 110)
**Problem**: Intervals may not be cleaned up properly
**Quick Fix Pattern**:
```typescript
// BEFORE
useEffect(() => {
fetchEvents();
const intervalId = setInterval(fetchEvents, 300000);
return () => clearInterval(intervalId);
}, []); // ❌ Missing dependencies
// AFTER
useEffect(() => {
if (status !== 'authenticated') return;
const fetchEvents = async () => {
// ... fetch logic
};
fetchEvents(); // Initial fetch
const intervalId = setInterval(fetchEvents, 300000);
return () => {
clearInterval(intervalId);
};
}, [status]); // ✅ Proper dependencies
```
---
### 5. Fix useEffect Infinite Loop Risk ⚠️ HIGH
**File**: `hooks/use-notifications.ts`
**Line**: 255
**Problem**: Function dependencies cause infinite re-renders
**Quick Fix**:
```typescript
// Remove function dependencies, use refs for stable references
const fetchNotificationCountRef = useRef(fetchNotificationCount);
const fetchNotificationsRef = useRef(fetchNotifications);
useEffect(() => {
fetchNotificationCountRef.current = fetchNotificationCount;
fetchNotificationsRef.current = fetchNotifications;
});
useEffect(() => {
if (status !== 'authenticated' || !session?.user) return;
isMountedRef.current = true;
fetchNotificationCountRef.current(true);
fetchNotificationsRef.current();
const intervalId = setInterval(() => {
if (isMountedRef.current) {
fetchNotificationCountRef.current();
}
}, POLLING_INTERVAL);
return () => {
isMountedRef.current = false;
clearInterval(intervalId);
};
}, [status, session?.user?.id]); // ✅ Only primitive values
```
---
## 🔧 Additional Quick Wins
### 6. Add Request Deduplication Utility
**Create**: `lib/utils/request-deduplication.ts`
```typescript
const pendingRequests = new Map<string, Promise<any>>();
export function deduplicateRequest<T>(
key: string,
requestFn: () => Promise<T>
): Promise<T> {
if (pendingRequests.has(key)) {
return pendingRequests.get(key)!;
}
const promise = requestFn().finally(() => {
pendingRequests.delete(key);
});
pendingRequests.set(key, promise);
return promise;
}
```
**Usage**:
```typescript
const data = await deduplicateRequest(
`notifications-${userId}`,
() => fetch('/api/notifications').then(r => r.json())
);
```
---
### 7. Extract Magic Numbers to Constants
**Create**: `lib/constants/intervals.ts`
```typescript
export const INTERVALS = {
NOTIFICATION_POLLING: 60000, // 1 minute
CALENDAR_REFRESH: 300000, // 5 minutes
PAROLE_POLLING: 30000, // 30 seconds
MIN_FETCH_INTERVAL: 5000, // 5 seconds
FETCH_COOLDOWN: 1000, // 1 second
} as const;
```
---
### 8. Add Error Retry Logic
**Create**: `lib/utils/retry.ts`
```typescript
export async function retry<T>(
fn: () => Promise<T>,
maxAttempts = 3,
delay = 1000
): Promise<T> {
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
try {
return await fn();
} catch (error) {
if (attempt === maxAttempts) throw error;
await new Promise(resolve => setTimeout(resolve, delay * attempt));
}
}
throw new Error('Max retry attempts reached');
}
```
---
## 📋 Testing Checklist
After applying fixes, test:
- [ ] No memory leaks (check browser DevTools Memory tab)
- [ ] No duplicate API calls (check Network tab)
- [ ] Intervals are cleaned up (check console for errors)
- [ ] No infinite loops (check React DevTools Profiler)
- [ ] Redis performance (check response times)
- [ ] Error handling works (test with network offline)
---
## 🎯 Priority Order
1. **Fix 1** (Memory Leak) - Do immediately
2. **Fix 2** (Double Fetching) - Do immediately
3. **Fix 3** (Redis KEYS) - Do immediately
4. **Fix 4** (Widget Cleanup) - Do within 24 hours
5. **Fix 5** (Infinite Loop) - Do within 24 hours
6. **Quick Wins** - Do within 1 week
---
*Last Updated: Critical fixes quick reference*

View File

@ -1,224 +0,0 @@
# Debug Erreur 502 - Callback Keycloak
## 🔍 Situation Actuelle
**URL** : `https://hub.slm-lab.net/api/auth/callback/keycloak?...`
**Logs observés** :
- ✅ Profile callback : OK
- ✅ JWT callback : OK (rôles extraits depuis access token)
- ❌ Session callback : **PAS DE LOGS** (ne s'exécute pas ou échoue silencieusement)
- ❌ Erreur 502 Nginx
## 🎯 Hypothèses
### Hypothèse 1 : Session callback échoue silencieusement
Le session callback pourrait échouer avant d'atteindre les logs, causant une exception non gérée.
### Hypothèse 2 : Problème avec token.email ou token.name
Si `token.email` ou `token.name` sont `undefined` et que le code s'attend à des valeurs, cela pourrait causer une erreur.
### Hypothèse 3 : Timeout ou problème de mémoire
Le callback pourrait prendre trop de temps ou consommer trop de mémoire.
### Hypothèse 4 : Problème avec NEXTAUTH_URL ou NEXTAUTH_SECRET
Configuration manquante ou incorrecte.
## ✅ Corrections Appliquées
### 1. Logs détaillés dans session callback
- Logs au début et à la fin
- Logs de chaque étape
- Logs des valeurs de token
### 2. Try-catch complet
- Capture toutes les erreurs
- Logs détaillés de l'erreur
- Stack trace complète
### 3. Validation des champs requis
- Vérification de `token.sub` (user ID)
- Gestion des valeurs `undefined`
### 4. Events NextAuth
- `signIn` event pour tracker l'authentification
- `error` event pour capturer les erreurs NextAuth
- `signOut` event pour tracking
## 🔍 Prochaines Étapes d'Investigation
### Étape 1 : Vérifier les nouveaux logs
Après redémarrage du serveur, vous devriez voir :
```
=== SESSION CALLBACK START ===
Token error: undefined
Has accessToken: true
Has refreshToken: true
Token role: [...]
Token sub: ...
...
=== SESSION CALLBACK END ===
```
**Si vous ne voyez PAS ces logs** :
- Le session callback ne s'exécute pas du tout
- Il y a une erreur avant d'atteindre le callback
- Problème dans NextAuth lui-même
**Si vous voyez une erreur** :
- Les logs détaillés indiqueront exactement où ça échoue
### Étape 2 : Vérifier les events NextAuth
Vous devriez voir :
```
=== NEXTAUTH SIGNIN EVENT ===
User: ... ...
Account: keycloak
Profile: ...
```
**Si vous voyez `=== NEXTAUTH ERROR EVENT ===`** :
- L'erreur sera loggée avec détails
### Étape 3 : Vérifier les variables d'environnement
**Vérifier dans `.env` ou `.env.local`** :
```bash
NEXTAUTH_URL=https://hub.slm-lab.net
NEXTAUTH_SECRET=... (doit être défini)
KEYCLOAK_ISSUER=https://connect.slm-lab.net/realms/cercle
KEYCLOAK_CLIENT_ID=...
KEYCLOAK_CLIENT_SECRET=...
```
**Commandes pour vérifier** :
```bash
# Vérifier que les variables sont chargées
node -e "console.log(process.env.NEXTAUTH_URL)"
node -e "console.log(process.env.NEXTAUTH_SECRET ? 'SET' : 'MISSING')"
```
### Étape 4 : Vérifier les logs Nginx
**Si Nginx est devant Next.js**, vérifier les logs Nginx :
```bash
# Logs d'erreur Nginx
sudo tail -f /var/log/nginx/error.log
# Logs d'accès Nginx
sudo tail -f /var/log/nginx/access.log
```
**Chercher** :
- Timeout errors
- Connection refused
- Upstream errors
### Étape 5 : Vérifier les logs système
**Vérifier si Next.js crash** :
```bash
# Logs système
journalctl -u nextjs -f
# Ou si PM2
pm2 logs
# Ou si systemd
systemctl status nextjs
```
## 🛠️ Actions Immédiates
### 1. Redémarrer le serveur Next.js
```bash
# Arrêter
pm2 stop neah
# Ou
systemctl stop nextjs
# Redémarrer
pm2 start neah
# Ou
systemctl start nextjs
```
### 2. Tester à nouveau la connexion
1. Aller sur `/signin`
2. Se connecter avec Keycloak
3. Observer les logs dans le terminal
### 3. Partager les logs complets
**Ce qu'il faut partager** :
- Tous les logs depuis le début de la connexion
- Les logs jusqu'à l'erreur 502
- Les logs Nginx (si disponibles)
- Les logs système (si disponibles)
## 🔧 Solutions Possibles
### Solution 1 : Problème avec token.email ou token.name
**Si les logs montrent** :
```
Token email: undefined
Token name: undefined
```
**Correction** : Le JWT callback doit extraire email et name depuis le profil ou le token d'accès.
### Solution 2 : Problème avec NEXTAUTH_URL
**Si NEXTAUTH_URL est incorrect** :
- NextAuth ne peut pas construire les URLs de callback
- Correction : Vérifier que `NEXTAUTH_URL` correspond à l'URL publique
### Solution 3 : Problème avec NEXTAUTH_SECRET
**Si NEXTAUTH_SECRET est manquant** :
- NextAuth ne peut pas signer les JWT
- Correction : Générer un secret et l'ajouter
### Solution 4 : Timeout
**Si le callback prend trop de temps** :
- Augmenter les timeouts Nginx
- Optimiser le code du callback
## 📊 Checklist de Debugging
- [ ] Serveur Next.js redémarré
- [ ] Logs `=== SESSION CALLBACK START ===` visibles
- [ ] Logs `=== SESSION CALLBACK END ===` visibles
- [ ] Pas d'erreur dans les logs
- [ ] Variables d'environnement vérifiées
- [ ] Logs Nginx vérifiés (si applicable)
- [ ] Logs système vérifiés (si applicable)
## 🎯 Ce qu'on cherche
**Dans les prochains logs, on cherche** :
1. **Si on voit `=== SESSION CALLBACK START ===`** :
- ✅ Le callback s'exécute
- Chercher l'erreur dans les logs suivants
2. **Si on NE voit PAS `=== SESSION CALLBACK START ===`** :
- ❌ Le callback ne s'exécute pas
- Problème dans NextAuth avant le callback
- Vérifier les events NextAuth
3. **Si on voit `=== NEXTAUTH ERROR EVENT ===`** :
- ✅ NextAuth a capturé une erreur
- L'erreur sera loggée avec détails
---
**Document créé le** : $(date)
**Statut** : En attente des nouveaux logs après redémarrage

View File

@ -1,145 +0,0 @@
# Deprecated Functions and Files
This document lists functions and files that have been deprecated and should not be used in new code.
## Deprecated Files
### 1. `lib/email-formatter.ts` (REMOVED)
- **Status**: Removed
- **Replacement**: Use `lib/utils/email-utils.ts` instead
- **Reason**: Consolidated email formatting to a single source of truth
### 2. `lib/mail-parser-wrapper.ts` (REMOVED)
- **Status**: Removed
- **Replacement**: Use functions from `lib/utils/email-utils.ts` instead
- **Reason**: Consolidated email formatting and sanitization to a single source of truth
### 3. `lib/email-parser.ts` (REMOVED)
- **Status**: Removed
- **Replacement**: Use `lib/server/email-parser.ts` for parsing and `lib/utils/email-utils.ts` for sanitization
- **Reason**: Consolidated email parsing and formatting to dedicated files
### 4. `lib/compose-mime-decoder.ts` (REMOVED)
- **Status**: Removed
- **Replacement**: Use `decodeComposeContent` and `encodeComposeContent` functions from `lib/utils/email-utils.ts`
- **Reason**: Consolidated MIME handling into the centralized formatter
## Deprecated Functions
### 1. `formatEmailForReplyOrForward` in `lib/services/email-service.ts` (REMOVED)
- **Status**: Removed
- **Replacement**: Use `formatEmailForReplyOrForward` from `lib/utils/email-utils.ts`
- **Reason**: Consolidated email formatting to a single source of truth
### 2. `formatSubject` in `lib/services/email-service.ts` (REMOVED)
- **Status**: Removed
- **Replacement**: None specific, handled by centralized formatter
- **Reason**: Internal function of the email formatter
### 3. `createQuoteHeader` in `lib/services/email-service.ts` (REMOVED)
- **Status**: Removed
- **Replacement**: None specific, handled by centralized formatter
- **Reason**: Internal function of the email formatter
## Centralized Email Formatting
All email formatting is now handled by the centralized formatter in `lib/utils/email-utils.ts`. This file contains:
1. `formatForwardedEmail`: Format emails for forwarding
2. `formatReplyEmail`: Format emails for replying or replying to all
3. `formatEmailForReplyOrForward`: Compatibility function that maps to the above two
4. `sanitizeHtml`: Safely sanitize HTML content while preserving direction attributes
Use these functions for all email formatting needs.
## Email Parsing and Processing Functions
### 1. `splitEmailHeadersAndBody` (REMOVED)
- **Location**: Removed
- **Reason**: Email parsing has been centralized in `lib/server/email-parser.ts` and the API endpoint.
- **Replacement**: Use the `parseEmail` function from `lib/server/email-parser.ts` which provides a comprehensive parsing solution.
### 2. `getReplyBody`
- **Location**: `app/courrier/page.tsx`
- **Reason**: Should use the `ReplyContent` component directly.
- **Replacement**: Use `<ReplyContent email={email} type={type} />` directly.
- **Status**: Currently marked with `@deprecated` comment, no direct usages found.
### 3. `generateEmailPreview`
- **Location**: `app/courrier/page.tsx`
- **Reason**: Should use the `EmailPreview` component directly.
- **Replacement**: Use `<EmailPreview email={email} />` directly.
- **Status**: Currently marked with `@deprecated` comment, no usages found.
### 4. `cleanHtml` (REMOVED)
- **Location**: Removed from `lib/server/email-parser.ts`
- **Reason**: HTML sanitization has been consolidated in `lib/utils/email-utils.ts`.
- **Replacement**: Use `sanitizeHtml` from `lib/utils/email-utils.ts`.
### 5. `processHtml` (REMOVED)
- **Location**: Removed from `app/api/parse-email/route.ts`
- **Reason**: HTML processing has been consolidated in `lib/utils/email-utils.ts`.
- **Replacement**: Use `sanitizeHtml` from `lib/utils/email-utils.ts`.
## Deprecated API Routes
### 1. `app/api/mail/[id]/route.ts` (REMOVED)
- **Status**: Removed
- **Replacement**: Use `app/api/courrier/[id]/route.ts` instead.
### 2. `app/api/mail/route.ts` (REMOVED)
- **Status**: Removed
- **Replacement**: Use `app/api/courrier/route.ts` instead.
### 3. `app/api/mail/send/route.ts` (REMOVED)
- **Status**: Removed
- **Replacement**: Use `app/api/courrier/send/route.ts` instead.
### 4. `DELETE /api/users/[userId]` (DEPRECATED)
- **Status**: Deprecated but maintained for backward compatibility
- **Replacement**: Use `DELETE /api/users?id=[userId]&email=[userEmail]` instead
- **Reason**: The new endpoint format supports deletion across all integrated systems (Keycloak, Leantime, and Dolibarr)
- **Notes**: The deprecated endpoint now forwards requests to the new endpoint but developers should update their code to use the new format directly
## Deprecated Components
### ComposeEmail (components/ComposeEmail.tsx) (REMOVED)
**Status:** Removed
**Replacement:** Use `components/email/ComposeEmail.tsx` instead
This component has been removed in favor of the more modular and better structured version in the email directory. The newer version has the following improvements:
- Better separation between user message and quoted content in replies/forwards
- Improved styling and visual hierarchy
- Support for RTL/LTR text direction toggling
- More modern UI using Card components instead of a modal
- Better state management for email composition
A compatibility layer has been added to the new component to ensure backward compatibility with existing code that uses the old component. This allows for a smooth transition without breaking changes.
## Migration Plan
### Phase 1: Deprecation (Completed)
- Mark all deprecated functions with `@deprecated` comments
- Add console warnings to deprecated functions
- Document alternatives
### Phase 2: Removal (Completed)
- Remove deprecated files: `lib/email-parser.ts` and `lib/mail-parser-wrapper.ts`
- Consolidate all email formatting in `lib/utils/email-utils.ts`
- All email parsing now in `lib/server/email-parser.ts`
- Update documentation to point to the centralized utilities
## Server-Client Code Separation
### Server-side imports in client components
- **Status**: Fixed in November 2023
- **Issue**: Server-only modules like ImapFlow were being imported directly in client components, causing build errors with messages like "Module not found: Can't resolve 'tls'"
- **Fix**:
1. Added 'use server' directive to server-only modules
2. Created client-safe interfaces in client components
3. Added server actions for email operations that need server capabilities
4. Refactored ComposeEmail component to avoid direct server imports
This architecture ensures a clean separation between server and client code, which is essential for Next.js applications, particularly with the App Router. It prevents Node.js-specific modules from being bundled into client-side JavaScript.

View File

@ -1,64 +0,0 @@
# Neah Desktop Application
This is the desktop version of Neah, built with Electron and Next.js.
## Development
To run the application in development mode:
```bash
npm run electron:dev
```
This will start the Next.js development server and launch the Electron application that connects to it.
## Building
To build installers for your current platform:
```bash
npm run electron:build
```
This will create installers in the `dist` directory. The build script handles:
- Building the Next.js application in static export mode
- Code signing (placeholder for macOS)
- Building installers for your current platform
## Running from Production Build
If you have already built the Next.js app and want to run the Electron app without building installers:
```bash
npm run electron:start
```
## Platform-specific Notes
### macOS
- The app is configured with a placeholder code signing certificate
- For distribution, you will need to replace this with a real certificate from Apple
### Windows
- Windows builds are configured to use NSIS for creating installers
### Linux
- Linux builds create AppImage and Debian packages
## Configuration
The Electron configuration is in:
- `electron/main.js` - The main Electron process
- `electron/preload.js` - Preload script exposing APIs to the renderer
- `package.json` - Build configuration in the `build` section
The window controls integration is in:
- `components/electron/WindowControls.tsx`
## Known Issues
- API routes: Since this is a static export, any server-side API routes will not work in the Electron app. You'll need to modify API calls to use external services or implement them in Electron.
- Authentication: If using server-side authentication, you may need to implement a custom flow for Electron.

View File

@ -1,259 +0,0 @@
# Iframe Logout Auto-Login Issue Analysis
## Problem
When you log out from an iframe application, you are automatically logged back into the dashboard without being prompted for credentials.
## Flow Trace
### Scenario: User Logs Out from Iframe Application
#### Step 1: Iframe Application Logout
```
Location: Iframe application (e.g., /parole, /gite, etc.)
Action: User clicks logout in iframe
What happens:
- Iframe app may call Keycloak logout endpoint directly
- OR: Iframe app sends postMessage to parent: { type: 'KEYCLOAK_LOGOUT' }
- OR: Iframe app clears its own session cookies
```
#### Step 2A: If Iframe Sends PostMessage (Expected Flow)
```
Location: components/layout/layout-wrapper.tsx (line 26-106)
OR: app/components/responsive-iframe.tsx (line 110-153)
Action: Dashboard receives logout message
What happens:
1. Sets sessionStorage.setItem('just_logged_out', 'true')
2. Sets document.cookie = 'logout_in_progress=true; path=/; max-age=60'
3. Calls /api/auth/end-sso-session (Admin API)
4. Calls signOut() from NextAuth
5. Redirects to Keycloak logout endpoint
6. Keycloak redirects back to /signin?logout=true
```
#### Step 2B: If Iframe Calls Keycloak Logout Directly (Actual Flow - Problem)
```
Location: Iframe application
Action: Iframe calls Keycloak logout endpoint directly
What happens:
1. Iframe redirects to: ${KEYCLOAK_ISSUER}/protocol/openid-connect/logout
2. Keycloak clears session cookies
3. Keycloak may redirect iframe back to its own logout page
4. Dashboard doesn't know about this logout
5. Dashboard still has NextAuth session (valid for 30 days)
```
#### Step 3: Dashboard Detects Session Invalidation
```
Location: app/api/auth/options.ts (refreshAccessToken function)
When: NextAuth tries to refresh the access token
What happens:
1. Dashboard calls Keycloak token refresh endpoint
2. Keycloak returns: { error: 'invalid_grant', error_description: 'Session not active' }
3. refreshAccessToken detects this error (line 100-108)
4. Returns token with error: "SessionNotActive"
5. JWT callback clears tokens (line 248-256)
6. Session callback returns null (line 272-276)
7. NextAuth treats user as unauthenticated
8. Status becomes "unauthenticated"
```
#### Step 4: Sign-In Page Auto-Login (THE PROBLEM)
```
Location: app/signin/page.tsx (line 47-79)
When: User is redirected to /signin (or status becomes "unauthenticated")
What happens:
1. Component mounts
2. First useEffect (line 16-45) checks for logout flag
- If logout=true in URL, sets isLogoutRedirect.current = true
- Removes 'just_logged_out' from sessionStorage
3. Second useEffect (line 47-79) checks authentication status
- If status === "authenticated" → redirects to home ✅
- If status === "unauthenticated" → triggers auto-login ❌
THE PROBLEM:
- When iframe logs out directly (not via postMessage), dashboard doesn't set logout flags
- Status becomes "unauthenticated" (because Keycloak session was cleared)
- Sign-in page sees status === "unauthenticated"
- Auto-login logic triggers after 1 second (line 69)
- signIn("keycloak") is called
- Keycloak still has SSO session cookie (if it wasn't fully cleared)
- User is auto-authenticated without credentials ❌
```
## Root Cause Analysis
### Problem 1: Missing Logout Flags
**When iframe logs out directly (not via postMessage):**
- Dashboard doesn't know about the logout
- `just_logged_out` is NOT set in sessionStorage
- `logout_in_progress` cookie is NOT set
- Sign-in page doesn't know this is a logout scenario
**Result**: Sign-in page treats it as a normal "unauthenticated" state and triggers auto-login.
### Problem 2: Auto-Login Logic Timing
**Sign-in page auto-login logic** (`app/signin/page.tsx:66-78`):
```typescript
if (status === "unauthenticated") {
hasAttemptedLogin.current = true;
const timer = setTimeout(() => {
if (!isLogoutRedirect.current) {
signIn("keycloak", { callbackUrl: "/" });
}
}, 1000);
}
```
**The Issue**:
- `isLogoutRedirect.current` is set in the first useEffect (line 16-45)
- But it only checks for `logout=true` in URL or `just_logged_out` in sessionStorage
- If iframe logs out directly, neither of these is set
- After 1 second, auto-login triggers
- `isLogoutRedirect.current` is still `false` (because logout flags weren't set)
- `signIn("keycloak")` is called
- User is auto-authenticated
### Problem 3: SSO Session Cookie Persistence
**Even if logout flags are set correctly:**
- Keycloak SSO session cookie (`KEYCLOAK_SESSION`) may still exist
- When `signIn("keycloak")` is called, Keycloak checks for SSO session cookie
- If cookie exists, Keycloak auto-authenticates without credentials
- This happens even with `prompt=login` parameter (if SSO session is still valid)
## Why This Happens
### Flow 1: Iframe Logs Out via PostMessage (Works Correctly)
```
1. Iframe sends postMessage → Dashboard receives it
2. Dashboard sets logout flags ✅
3. Dashboard calls logout endpoints ✅
4. Redirects to /signin?logout=true ✅
5. Sign-in page sees logout=true ✅
6. Auto-login is prevented ✅
7. User must click "Se connecter" manually ✅
```
### Flow 2: Iframe Logs Out Directly (THE PROBLEM)
```
1. Iframe calls Keycloak logout directly
2. Keycloak clears session cookies
3. Dashboard doesn't know about logout ❌
4. NextAuth tries to refresh token
5. Keycloak returns "Session not active"
6. NextAuth marks user as unauthenticated
7. User is redirected to /signin (no logout=true) ❌
8. Sign-in page sees status="unauthenticated" ❌
9. Auto-login triggers after 1 second ❌
10. Keycloak still has SSO session cookie ❌
11. User is auto-authenticated ❌
```
## The Real Issue
**The sign-in page auto-login logic is too aggressive:**
1. It triggers auto-login for ANY "unauthenticated" state
2. It doesn't distinguish between:
- User never logged in (should auto-login) ✅
- User logged out (should NOT auto-login) ❌
- Session expired (should NOT auto-login) ❌
- Keycloak session invalidated (should NOT auto-login) ❌
3. The logout detection only works if:
- `logout=true` is in URL (from Keycloak redirect)
- `just_logged_out` is in sessionStorage (from dashboard logout)
- But NOT if iframe logs out directly
## Solution Requirements
To fix this issue, you need to:
1. **Detect Keycloak Session Invalidation**:
- When NextAuth detects "SessionNotActive" error
- Set a flag to prevent auto-login
- Mark this as a logout scenario, not a new login
2. **Improve Logout Detection**:
- Check for Keycloak session cookie existence
- If session was invalidated (not just expired), prevent auto-login
- Store logout reason in sessionStorage
3. **Modify Auto-Login Logic**:
- Only auto-login if:
- User is truly unauthenticated (never logged in)
- AND no logout flags are set
- AND no session invalidation detected
- Don't auto-login if:
- Logout flags are set
- Session was invalidated
- User came from a logout flow
4. **Handle Iframe Direct Logout**:
- Detect when Keycloak session is invalidated
- Set logout flags automatically
- Prevent auto-login
## Current Code Issues
### Issue 1: Auto-Login Logic (`app/signin/page.tsx:66-78`)
```typescript
if (status === "unauthenticated") {
// This triggers for ANY unauthenticated state
// Doesn't check if session was invalidated
signIn("keycloak", { callbackUrl: "/" });
}
```
### Issue 2: Logout Detection (`app/signin/page.tsx:16-45`)
```typescript
// Only checks for explicit logout flags
// Doesn't detect session invalidation
const logoutParam = searchParams.get('logout');
const fromLogout = sessionStorage.getItem('just_logged_out');
```
### Issue 3: Session Invalidation Detection (`app/api/auth/options.ts:248-256`)
```typescript
// Detects session invalidation
// But doesn't set logout flags
// Sign-in page doesn't know session was invalidated
if (refreshedToken.error === "SessionNotActive") {
return {
...refreshedToken,
accessToken: undefined,
// Should set a flag here to prevent auto-login
};
}
```
## Summary
**Why you're auto-logged in after iframe logout:**
1. Iframe logs out directly (not via postMessage)
2. Keycloak session is cleared
3. Dashboard detects session invalidation
4. User becomes "unauthenticated"
5. Sign-in page auto-login logic triggers (after 1 second)
6. Keycloak still has SSO session cookie
7. User is auto-authenticated without credentials
**The fix requires:**
- Detecting session invalidation and setting logout flags
- Preventing auto-login when session was invalidated
- Only auto-login for truly new users (never logged in)

View File

@ -1,89 +0,0 @@
# Iframe Logout Session Invalidation Fix
## Problem
When a user logs out from an application inside an iframe:
1. The iframe application calls Keycloak logout endpoint
2. Keycloak session is invalidated
3. NextAuth dashboard still has a valid JWT token
4. When NextAuth tries to refresh the token, Keycloak returns: `{ error: 'invalid_grant', error_description: 'Session not active' }`
5. This causes a `JWT_SESSION_ERROR` and the user sees errors but isn't automatically signed out
## Root Cause
The `refreshAccessToken` function was catching all errors generically and setting `error: "RefreshAccessTokenError"`. When the session callback received this error, it would throw, causing a JWT_SESSION_ERROR but not properly signing the user out.
## Solution
### 1. Detect Session Invalidation
In `refreshAccessToken`, we now specifically detect when Keycloak returns `invalid_grant` with "Session not active":
```typescript
if (refreshedTokens.error === 'invalid_grant' ||
refreshedTokens.error_description?.includes('Session not active') ||
refreshedTokens.error_description?.includes('Token is not active')) {
return {
...token,
error: "SessionNotActive",
};
}
```
### 2. Clear Tokens in JWT Callback
When we detect `SessionNotActive`, we clear the tokens in the JWT callback:
```typescript
if (refreshedToken.error === "SessionNotActive") {
return {
...refreshedToken,
accessToken: undefined,
refreshToken: undefined,
idToken: undefined,
};
}
```
### 3. Return Null in Session Callback
When tokens are missing or session is invalidated, the session callback returns `null`, which makes NextAuth treat the user as unauthenticated:
```typescript
if (token.error === "SessionNotActive" || !token.accessToken) {
return null as any; // NextAuth will treat user as unauthenticated
}
```
## Result
Now when a user logs out from an iframe application:
1. Keycloak session is invalidated
2. NextAuth detects the invalid session on next token refresh
3. Tokens are cleared
4. Session callback returns null
5. User is automatically treated as unauthenticated
6. NextAuth redirects to sign-in page (via AuthCheck component)
## Files Modified
- `app/api/auth/options.ts`:
- Enhanced `refreshAccessToken` to detect `invalid_grant` errors
- Clear tokens when session is invalidated
- Return null from session callback when session is invalid
## Testing
To test this fix:
1. Log in to the dashboard
2. Open an iframe application
3. Log out from the iframe application
4. Wait for NextAuth to try to refresh the token (or trigger a page refresh)
5. User should be automatically signed out and redirected to sign-in
---
**Date**: 2024
**Status**: ✅ Fixed
**Version**: 1.0

View File

@ -1,286 +0,0 @@
# Implementation Checklist: Unified Refresh System
## 📋 Step-by-Step Implementation Guide
### Phase 1: Foundation (Day 1) ⚡ CRITICAL
#### ✅ Step 1.1: Create Refresh Manager
- [ ] Create `lib/services/refresh-manager.ts`
- [ ] Test singleton pattern
- [ ] Test register/unregister
- [ ] Test start/stop
- [ ] Test deduplication logic
**Estimated Time**: 2-3 hours
---
#### ✅ Step 1.2: Create Request Deduplication
- [ ] Create `lib/utils/request-deduplication.ts`
- [ ] Test deduplication with same key
- [ ] Test TTL expiration
- [ ] Test cleanup
**Estimated Time**: 1 hour
---
#### ✅ Step 1.3: Create Constants
- [ ] Create `lib/constants/refresh-intervals.ts`
- [ ] Define all intervals
- [ ] Export helper function
**Estimated Time**: 30 minutes
---
#### ✅ Step 1.4: Create Unified Hook
- [ ] Create `hooks/use-unified-refresh.ts`
- [ ] Test registration on mount
- [ ] Test cleanup on unmount
- [ ] Test manual refresh
**Estimated Time**: 1-2 hours
---
### Phase 2: Fix Critical Issues (Day 1-2) 🔴 URGENT
#### ✅ Step 2.1: Fix Redis KEYS → SCAN
- [ ] Update `lib/services/notifications/notification-service.ts` line 293
- [ ] Replace `redis.keys()` with `redis.scan()`
- [ ] Test with large key sets
**Estimated Time**: 30 minutes
---
#### ✅ Step 2.2: Fix Notification Hook Memory Leak
- [ ] Fix `hooks/use-notifications.ts` useEffect cleanup
- [ ] Remove function dependencies
- [ ] Test cleanup on unmount
**Estimated Time**: 1 hour
---
#### ✅ Step 2.3: Fix Notification Badge Double Fetch
- [ ] Update `components/notification-badge.tsx`
- [ ] Remove duplicate useEffect hooks
- [ ] Add request deduplication
- [ ] Test single fetch per action
**Estimated Time**: 1 hour
---
### Phase 3: Refactor Notifications (Day 2) 🟡 HIGH PRIORITY
#### ✅ Step 3.1: Refactor useNotifications Hook
- [ ] Integrate unified refresh
- [ ] Add request deduplication
- [ ] Remove manual polling
- [ ] Test all functionality
**Estimated Time**: 2-3 hours
---
#### ✅ Step 3.2: Update Notification Badge
- [ ] Remove manual fetch logic
- [ ] Use hook's refresh function
- [ ] Test UI interactions
**Estimated Time**: 1 hour
---
### Phase 4: Refactor Widgets (Day 3-4) 🟢 MEDIUM PRIORITY
#### ✅ Step 4.1: Refactor Calendar Widget
- [ ] Update `components/calendar.tsx`
- [ ] Use unified refresh hook
- [ ] Add request deduplication
- [ ] Test refresh functionality
**Estimated Time**: 1 hour
---
#### ✅ Step 4.2: Refactor Parole Widget
- [ ] Update `components/parole.tsx`
- [ ] Use unified refresh hook
- [ ] Remove manual interval
- [ ] Test chat updates
**Estimated Time**: 1 hour
---
#### ✅ Step 4.3: Refactor News Widget
- [ ] Update `components/news.tsx`
- [ ] Use unified refresh hook
- [ ] Add auto-refresh (was manual only)
- [ ] Test news updates
**Estimated Time**: 1 hour
---
#### ✅ Step 4.4: Refactor Email Widget
- [ ] Update `components/email.tsx`
- [ ] Use unified refresh hook
- [ ] Add auto-refresh (was manual only)
- [ ] Test email updates
**Estimated Time**: 1 hour
---
#### ✅ Step 4.5: Refactor Duties Widget
- [ ] Update `components/flow.tsx`
- [ ] Use unified refresh hook
- [ ] Add auto-refresh (was manual only)
- [ ] Test task updates
**Estimated Time**: 1 hour
---
#### ✅ Step 4.6: Refactor Navigation Bar Time
- [ ] Create `components/main-nav-time.tsx`
- [ ] Update `components/main-nav.tsx` to use new component
- [ ] Use unified refresh hook (1 second interval)
- [ ] Test time updates correctly
- [ ] Verify cleanup on unmount
**Estimated Time**: 30 minutes
---
### Phase 5: Testing & Validation (Day 5) ✅ FINAL
#### ✅ Step 5.1: Memory Leak Testing
- [ ] Open DevTools Memory tab
- [ ] Monitor memory over 10 minutes
- [ ] Verify no memory leaks
- [ ] Check interval cleanup
**Estimated Time**: 1 hour
---
#### ✅ Step 5.2: API Call Reduction Testing
- [ ] Open DevTools Network tab
- [ ] Monitor API calls for 5 minutes
- [ ] Verify deduplication works
- [ ] Count total calls (should be ~60% less)
**Estimated Time**: 1 hour
---
#### ✅ Step 5.3: Performance Testing
- [ ] Test page load time
- [ ] Test widget refresh times
- [ ] Test with multiple tabs open
- [ ] Verify no performance degradation
**Estimated Time**: 1 hour
---
#### ✅ Step 5.4: User Experience Testing
- [ ] Test all widgets refresh correctly
- [ ] Test manual refresh buttons
- [ ] Test notification updates
- [ ] Verify smooth UX
**Estimated Time**: 1 hour
---
## 🎯 Daily Progress Tracking
### Day 1 Target:
- [x] Phase 1: Foundation (Steps 1.1-1.4)
- [x] Phase 2: Critical Fixes (Steps 2.1-2.3)
**Status**: ⏳ In Progress
---
### Day 2 Target:
- [ ] Phase 3: Notifications (Steps 3.1-3.2)
**Status**: ⏸️ Pending
---
### Day 3 Target:
- [ ] Phase 4: Widgets Part 1 (Steps 4.1-4.2)
**Status**: ⏸️ Pending
---
### Day 4 Target:
- [ ] Phase 4: Widgets Part 2 (Steps 4.3-4.5)
**Status**: ⏸️ Pending
---
### Day 5 Target:
- [ ] Phase 5: Testing (Steps 5.1-5.4)
**Status**: ⏸️ Pending
---
## 🐛 Known Issues to Watch For
1. **Race Conditions**: Monitor for duplicate requests
2. **Memory Leaks**: Watch for uncleaned intervals
3. **Performance**: Monitor API call frequency
4. **User Experience**: Ensure smooth refresh transitions
---
## 📊 Success Criteria
### Must Have:
- ✅ No memory leaks
- ✅ 60%+ reduction in API calls
- ✅ All widgets refresh correctly
- ✅ No duplicate requests
### Nice to Have:
- ✅ Configurable refresh intervals
- ✅ Pause/resume functionality
- ✅ Refresh status monitoring
- ✅ Error recovery
---
## 🔄 Rollback Plan
If issues arise:
1. **Keep old code**: Don't delete old implementations immediately
2. **Feature flag**: Use environment variable to toggle new/old system
3. **Gradual migration**: Migrate one widget at a time
4. **Monitor**: Watch for errors in production
---
## 📝 Notes
- All new code should be backward compatible
- Test each phase before moving to next
- Document any deviations from plan
- Update this checklist as you progress
---
*Last Updated: Implementation Checklist v1.0*

View File

@ -1,888 +0,0 @@
# Implementation Plan: Unified Refresh System
## 🎯 Goals
1. **Harmonize auto-refresh** across all widgets and notifications
2. **Reduce redundancy** and eliminate duplicate API calls
3. **Improve API efficiency** with request deduplication and caching
4. **Prevent memory leaks** with proper cleanup mechanisms
5. **Centralize refresh logic** for easier maintenance
---
## 📋 Current State Analysis
### Current Refresh Intervals:
- **Notifications**: 60 seconds (polling)
- **Calendar**: 5 minutes (300000ms)
- **Parole (Chat)**: 30 seconds (30000ms)
- **Navbar Time**: Static (not refreshing - needs fix)
- **News**: Manual only
- **Email**: Manual only
- **Duties (Tasks)**: Manual only
### Current Problems:
1. ❌ No coordination between widgets
2. ❌ Duplicate API calls from multiple components
3. ❌ Memory leaks from uncleaned intervals
4. ❌ No request deduplication
5. ❌ Inconsistent refresh patterns
---
## 🏗️ Architecture: Unified Refresh System
### Phase 1: Core Infrastructure
#### 1.1 Create Unified Refresh Manager
**File**: `lib/services/refresh-manager.ts`
```typescript
/**
* Unified Refresh Manager
* Centralizes all refresh logic, prevents duplicates, manages intervals
*/
export type RefreshableResource =
| 'notifications'
| 'notifications-count'
| 'calendar'
| 'news'
| 'email'
| 'parole'
| 'duties';
export interface RefreshConfig {
resource: RefreshableResource;
interval: number; // milliseconds
enabled: boolean;
priority: 'high' | 'medium' | 'low';
onRefresh: () => Promise<void>;
}
class RefreshManager {
private intervals: Map<RefreshableResource, NodeJS.Timeout> = new Map();
private configs: Map<RefreshableResource, RefreshConfig> = new Map();
private pendingRequests: Map<string, Promise<any>> = new Map();
private lastRefresh: Map<RefreshableResource, number> = new Map();
private isActive = false;
/**
* Register a refreshable resource
*/
register(config: RefreshConfig): void {
this.configs.set(config.resource, config);
if (config.enabled && this.isActive) {
this.startRefresh(config.resource);
}
}
/**
* Unregister a resource
*/
unregister(resource: RefreshableResource): void {
this.stopRefresh(resource);
this.configs.delete(resource);
this.lastRefresh.delete(resource);
}
/**
* Start all refresh intervals
*/
start(): void {
if (this.isActive) return;
this.isActive = true;
// Start all enabled resources
this.configs.forEach((config, resource) => {
if (config.enabled) {
this.startRefresh(resource);
}
});
}
/**
* Stop all refresh intervals
*/
stop(): void {
this.isActive = false;
// Clear all intervals
this.intervals.forEach((interval) => {
clearInterval(interval);
});
this.intervals.clear();
}
/**
* Start refresh for a specific resource
*/
private startRefresh(resource: RefreshableResource): void {
// Stop existing interval if any
this.stopRefresh(resource);
const config = this.configs.get(resource);
if (!config || !config.enabled) return;
// Initial refresh
this.executeRefresh(resource);
// Set up interval
const interval = setInterval(() => {
this.executeRefresh(resource);
}, config.interval);
this.intervals.set(resource, interval);
}
/**
* Stop refresh for a specific resource
*/
private stopRefresh(resource: RefreshableResource): void {
const interval = this.intervals.get(resource);
if (interval) {
clearInterval(interval);
this.intervals.delete(resource);
}
}
/**
* Execute refresh with deduplication
*/
private async executeRefresh(resource: RefreshableResource): Promise<void> {
const config = this.configs.get(resource);
if (!config) return;
const requestKey = `${resource}-${Date.now()}`;
const now = Date.now();
const lastRefreshTime = this.lastRefresh.get(resource) || 0;
// Prevent too frequent refreshes (minimum 1 second between same resource)
if (now - lastRefreshTime < 1000) {
console.log(`[RefreshManager] Skipping ${resource} - too soon`);
return;
}
// Check if there's already a pending request for this resource
const pendingKey = `${resource}-pending`;
if (this.pendingRequests.has(pendingKey)) {
console.log(`[RefreshManager] Deduplicating ${resource} request`);
return;
}
// Create and track the request
const refreshPromise = config.onRefresh()
.then(() => {
this.lastRefresh.set(resource, Date.now());
})
.catch((error) => {
console.error(`[RefreshManager] Error refreshing ${resource}:`, error);
})
.finally(() => {
this.pendingRequests.delete(pendingKey);
});
this.pendingRequests.set(pendingKey, refreshPromise);
try {
await refreshPromise;
} catch (error) {
// Error already logged above
}
}
/**
* Manually trigger refresh for a resource
*/
async refresh(resource: RefreshableResource, force = false): Promise<void> {
const config = this.configs.get(resource);
if (!config) {
throw new Error(`Resource ${resource} not registered`);
}
if (force) {
// Force refresh: clear last refresh time
this.lastRefresh.delete(resource);
}
await this.executeRefresh(resource);
}
/**
* Get refresh status
*/
getStatus(): {
active: boolean;
resources: Array<{
resource: RefreshableResource;
enabled: boolean;
lastRefresh: number | null;
interval: number;
}>;
} {
const resources = Array.from(this.configs.entries()).map(([resource, config]) => ({
resource,
enabled: config.enabled,
lastRefresh: this.lastRefresh.get(resource) || null,
interval: config.interval,
}));
return {
active: this.isActive,
resources,
};
}
}
// Singleton instance
export const refreshManager = new RefreshManager();
```
---
#### 1.2 Create Request Deduplication Utility
**File**: `lib/utils/request-deduplication.ts`
```typescript
/**
* Request Deduplication Utility
* Prevents duplicate API calls for the same resource
*/
interface PendingRequest<T> {
promise: Promise<T>;
timestamp: number;
}
class RequestDeduplicator {
private pendingRequests = new Map<string, PendingRequest<any>>();
private readonly DEFAULT_TTL = 5000; // 5 seconds
/**
* Execute a request with deduplication
*/
async execute<T>(
key: string,
requestFn: () => Promise<T>,
ttl: number = this.DEFAULT_TTL
): Promise<T> {
// Check if there's a pending request
const pending = this.pendingRequests.get(key);
if (pending) {
const age = Date.now() - pending.timestamp;
// If request is still fresh, reuse it
if (age < ttl) {
console.log(`[RequestDeduplicator] Reusing pending request: ${key}`);
return pending.promise;
} else {
// Request is stale, remove it
this.pendingRequests.delete(key);
}
}
// Create new request
const promise = requestFn()
.finally(() => {
// Clean up after request completes
this.pendingRequests.delete(key);
});
this.pendingRequests.set(key, {
promise,
timestamp: Date.now(),
});
return promise;
}
/**
* Cancel a pending request
*/
cancel(key: string): void {
this.pendingRequests.delete(key);
}
/**
* Clear all pending requests
*/
clear(): void {
this.pendingRequests.clear();
}
/**
* Get pending requests count
*/
getPendingCount(): number {
return this.pendingRequests.size;
}
}
export const requestDeduplicator = new RequestDeduplicator();
```
---
#### 1.3 Create Unified Refresh Hook
**File**: `hooks/use-unified-refresh.ts`
```typescript
/**
* Unified Refresh Hook
* Provides consistent refresh functionality for all widgets
*/
import { useEffect, useCallback, useRef } from 'react';
import { useSession } from 'next-auth/react';
import { refreshManager, RefreshableResource } from '@/lib/services/refresh-manager';
interface UseUnifiedRefreshOptions {
resource: RefreshableResource;
interval: number;
enabled?: boolean;
onRefresh: () => Promise<void>;
priority?: 'high' | 'medium' | 'low';
}
export function useUnifiedRefresh({
resource,
interval,
enabled = true,
onRefresh,
priority = 'medium',
}: UseUnifiedRefreshOptions) {
const { status } = useSession();
const onRefreshRef = useRef(onRefresh);
const isMountedRef = useRef(true);
// Update callback ref when it changes
useEffect(() => {
onRefreshRef.current = onRefresh;
}, [onRefresh]);
// Register/unregister with refresh manager
useEffect(() => {
if (status !== 'authenticated' || !enabled) {
return;
}
isMountedRef.current = true;
// Register with refresh manager
refreshManager.register({
resource,
interval,
enabled: true,
priority,
onRefresh: async () => {
if (isMountedRef.current) {
await onRefreshRef.current();
}
},
});
// Start refresh manager if not already started
refreshManager.start();
// Cleanup
return () => {
isMountedRef.current = false;
refreshManager.unregister(resource);
};
}, [resource, interval, enabled, priority, status]);
// Manual refresh function
const refresh = useCallback(
async (force = false) => {
if (status !== 'authenticated') return;
await refreshManager.refresh(resource, force);
},
[resource, status]
);
return {
refresh,
isActive: refreshManager.getStatus().active,
};
}
```
---
### Phase 2: Harmonized Refresh Intervals
#### 2.1 Define Standard Intervals
**File**: `lib/constants/refresh-intervals.ts`
```typescript
/**
* Standard Refresh Intervals
* All intervals in milliseconds
*/
export const REFRESH_INTERVALS = {
// High priority - real-time updates
NOTIFICATIONS: 30000, // 30 seconds (was 60s)
NOTIFICATIONS_COUNT: 30000, // 30 seconds (same as notifications)
PAROLE: 30000, // 30 seconds (unchanged)
NAVBAR_TIME: 1000, // 1 second (navigation bar time - real-time)
// Medium priority - frequent but not real-time
EMAIL: 60000, // 1 minute (was manual only)
DUTIES: 120000, // 2 minutes (was manual only)
// Low priority - less frequent updates
CALENDAR: 300000, // 5 minutes (unchanged)
NEWS: 600000, // 10 minutes (was manual only)
// Minimum interval between refreshes (prevents spam)
MIN_INTERVAL: 1000, // 1 second
} as const;
/**
* Get refresh interval for a resource
*/
export function getRefreshInterval(resource: string): number {
switch (resource) {
case 'notifications':
return REFRESH_INTERVALS.NOTIFICATIONS;
case 'notifications-count':
return REFRESH_INTERVALS.NOTIFICATIONS_COUNT;
case 'parole':
return REFRESH_INTERVALS.PAROLE;
case 'email':
return REFRESH_INTERVALS.EMAIL;
case 'duties':
return REFRESH_INTERVALS.DUTIES;
case 'calendar':
return REFRESH_INTERVALS.CALENDAR;
case 'news':
return REFRESH_INTERVALS.NEWS;
default:
return 60000; // Default: 1 minute
}
}
```
---
### Phase 3: Refactor Widgets
#### 3.1 Refactor Notification Hook
**File**: `hooks/use-notifications.ts` (Refactored)
```typescript
import { useState, useEffect, useCallback, useRef } from 'react';
import { useSession } from 'next-auth/react';
import { Notification, NotificationCount } from '@/lib/types/notification';
import { useUnifiedRefresh } from './use-unified-refresh';
import { REFRESH_INTERVALS } from '@/lib/constants/refresh-intervals';
import { requestDeduplicator } from '@/lib/utils/request-deduplication';
const defaultNotificationCount: NotificationCount = {
total: 0,
unread: 0,
sources: {},
};
export function useNotifications() {
const { data: session, status } = useSession();
const [notifications, setNotifications] = useState<Notification[]>([]);
const [notificationCount, setNotificationCount] = useState<NotificationCount>(defaultNotificationCount);
const [loading, setLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
const isMountedRef = useRef(true);
// Fetch notification count
const fetchNotificationCount = useCallback(async () => {
if (!session?.user || !isMountedRef.current) return;
try {
setError(null);
const data = await requestDeduplicator.execute(
`notifications-count-${session.user.id}`,
async () => {
const response = await fetch('/api/notifications/count', {
credentials: 'include',
});
if (!response.ok) {
throw new Error('Failed to fetch notification count');
}
return response.json();
}
);
if (isMountedRef.current) {
setNotificationCount(data);
}
} catch (err) {
console.error('Error fetching notification count:', err);
if (isMountedRef.current) {
setError('Failed to fetch notification count');
}
}
}, [session?.user]);
// Fetch notifications
const fetchNotifications = useCallback(async (page = 1, limit = 20) => {
if (!session?.user || !isMountedRef.current) return;
setLoading(true);
setError(null);
try {
const data = await requestDeduplicator.execute(
`notifications-${session.user.id}-${page}-${limit}`,
async () => {
const response = await fetch(`/api/notifications?page=${page}&limit=${limit}`, {
credentials: 'include',
});
if (!response.ok) {
throw new Error('Failed to fetch notifications');
}
return response.json();
}
);
if (isMountedRef.current) {
setNotifications(data.notifications);
}
} catch (err) {
console.error('Error fetching notifications:', err);
if (isMountedRef.current) {
setError('Failed to fetch notifications');
}
} finally {
if (isMountedRef.current) {
setLoading(false);
}
}
}, [session?.user]);
// Use unified refresh for notification count
useUnifiedRefresh({
resource: 'notifications-count',
interval: REFRESH_INTERVALS.NOTIFICATIONS_COUNT,
enabled: status === 'authenticated',
onRefresh: fetchNotificationCount,
priority: 'high',
});
// Initial fetch
useEffect(() => {
isMountedRef.current = true;
if (status === 'authenticated' && session?.user) {
fetchNotificationCount();
fetchNotifications();
}
return () => {
isMountedRef.current = false;
};
}, [status, session?.user, fetchNotificationCount, fetchNotifications]);
// Mark as read
const markAsRead = useCallback(async (notificationId: string) => {
if (!session?.user) return false;
try {
const response = await fetch(`/api/notifications/${notificationId}/read`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
credentials: 'include',
});
if (!response.ok) return false;
setNotifications(prev =>
prev.map(n => n.id === notificationId ? { ...n, isRead: true } : n)
);
await fetchNotificationCount();
return true;
} catch (err) {
console.error('Error marking notification as read:', err);
return false;
}
}, [session?.user, fetchNotificationCount]);
// Mark all as read
const markAllAsRead = useCallback(async () => {
if (!session?.user) return false;
try {
const response = await fetch('/api/notifications/read-all', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
credentials: 'include',
});
if (!response.ok) return false;
setNotifications(prev => prev.map(n => ({ ...n, isRead: true })));
await fetchNotificationCount();
return true;
} catch (err) {
console.error('Error marking all notifications as read:', err);
return false;
}
}, [session?.user, fetchNotificationCount]);
return {
notifications,
notificationCount,
loading,
error,
fetchNotifications,
fetchNotificationCount,
markAsRead,
markAllAsRead,
};
}
```
---
#### 3.2 Refactor Widget Components
**Example: Calendar Widget**
**File**: `components/calendar.tsx` (Refactored)
```typescript
"use client";
import { useEffect, useState } from "react";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Button } from "@/components/ui/button";
import { RefreshCw, Calendar as CalendarIcon } from "lucide-react";
import { useUnifiedRefresh } from '@/hooks/use-unified-refresh';
import { REFRESH_INTERVALS } from '@/lib/constants/refresh-intervals';
import { requestDeduplicator } from '@/lib/utils/request-deduplication';
import { useSession } from 'next-auth/react';
interface Event {
id: string;
title: string;
start: string;
end: string;
allDay: boolean;
calendar: string;
calendarColor: string;
}
export function Calendar() {
const { status } = useSession();
const [events, setEvents] = useState<Event[]>([]);
const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
const fetchEvents = async () => {
if (status !== 'authenticated') return;
setLoading(true);
setError(null);
try {
const calendarsData = await requestDeduplicator.execute(
'calendar-events',
async () => {
const response = await fetch('/api/calendars?refresh=true');
if (!response.ok) {
throw new Error('Failed to fetch events');
}
return response.json();
}
);
const now = new Date();
now.setHours(0, 0, 0, 0);
const allEvents = calendarsData.flatMap((calendar: any) =>
(calendar.events || []).map((event: any) => ({
id: event.id,
title: event.title,
start: event.start,
end: event.end,
allDay: event.isAllDay,
calendar: calendar.name,
calendarColor: calendar.color
}))
);
const upcomingEvents = allEvents
.filter((event: any) => new Date(event.start) >= now)
.sort((a: any, b: any) => new Date(a.start).getTime() - new Date(b.start).getTime())
.slice(0, 7);
setEvents(upcomingEvents);
} catch (err) {
console.error('Error fetching events:', err);
setError('Failed to load events');
} finally {
setLoading(false);
}
};
// Use unified refresh
const { refresh } = useUnifiedRefresh({
resource: 'calendar',
interval: REFRESH_INTERVALS.CALENDAR,
enabled: status === 'authenticated',
onRefresh: fetchEvents,
priority: 'low',
});
// Initial fetch
useEffect(() => {
if (status === 'authenticated') {
fetchEvents();
}
}, [status]);
// ... rest of component (formatDate, formatTime, render)
return (
<Card className="...">
<CardHeader>
<CardTitle>Agenda</CardTitle>
<Button onClick={() => refresh(true)}>
<RefreshCw className="..." />
</Button>
</CardHeader>
{/* ... */}
</Card>
);
}
```
---
### Phase 4: Implementation Steps
#### Step 1: Create Core Infrastructure (Day 1)
1. ✅ Create `lib/services/refresh-manager.ts`
2. ✅ Create `lib/utils/request-deduplication.ts`
3. ✅ Create `lib/constants/refresh-intervals.ts`
4. ✅ Create `hooks/use-unified-refresh.ts`
**Testing**: Unit tests for each module
---
#### Step 2: Fix Memory Leaks (Day 1-2)
1. ✅ Fix `useNotifications` hook cleanup
2. ✅ Fix notification badge double fetching
3. ✅ Fix widget interval cleanup
4. ✅ Fix Redis KEYS → SCAN
**Testing**: Memory leak detection in DevTools
---
#### Step 3: Refactor Notifications (Day 2)
1. ✅ Refactor `hooks/use-notifications.ts`
2. ✅ Update `components/notification-badge.tsx`
3. ✅ Remove duplicate fetch logic
**Testing**: Verify no duplicate API calls
---
#### Step 4: Refactor Widgets (Day 3-4)
1. ✅ Refactor `components/calendar.tsx`
2. ✅ Refactor `components/parole.tsx`
3. ✅ Refactor `components/news.tsx`
4. ✅ Refactor `components/email.tsx`
5. ✅ Refactor `components/flow.tsx` (Duties)
6. ✅ Refactor `components/main-nav.tsx` (Time display)
**Testing**: Verify all widgets refresh correctly
---
#### Step 5: Testing & Optimization (Day 5)
1. ✅ Performance testing
2. ✅ Memory leak verification
3. ✅ API call reduction verification
4. ✅ User experience testing
---
## 📊 Expected Improvements
### Before:
- **API Calls**: ~120-150 calls/minute (with duplicates)
- **Memory Leaks**: Yes (intervals not cleaned up)
- **Refresh Coordination**: None
- **Request Deduplication**: None
### After:
- **API Calls**: ~40-50 calls/minute (60-70% reduction)
- **Memory Leaks**: None (proper cleanup)
- **Refresh Coordination**: Centralized
- **Request Deduplication**: Full coverage
---
## 🎯 Success Metrics
1. **API Call Reduction**: 60%+ reduction in duplicate calls
2. **Memory Usage**: No memory leaks detected
3. **Performance**: Faster page loads, smoother UX
4. **Maintainability**: Single source of truth for refresh logic
---
## 🚀 Quick Start Implementation
### Priority Order:
1. **Critical** (Do First):
- Fix memory leaks
- Create refresh manager
- Create request deduplication
2. **High** (Do Second):
- Refactor notifications
- Refactor high-frequency widgets (parole, notifications)
3. **Medium** (Do Third):
- Refactor medium-frequency widgets (email, duties)
4. **Low** (Do Last):
- Refactor low-frequency widgets (calendar, news)
---
## 📝 Notes
- All intervals are configurable via constants
- Refresh manager can be paused/resumed globally
- Request deduplication prevents duplicate calls within 5 seconds
- All cleanup is handled automatically
- Compatible with existing code (gradual migration)
---
*Implementation Plan v1.0*

View File

@ -1,142 +0,0 @@
# Implementation Summary - Logging Improvements
**Date**: 2026-01-01
**Status**: ✅ Completed
---
## 📋 What Was Done
### 1. Impact Analysis Completed ✅
**File Created**: `SESSION_CALLBACK_LOGGING_IMPACT_ANALYSIS.md`
**Key Findings**:
- ✅ **No functional impact** on Keycloak, MinIO, or external services
- ✅ **Logging was added for debugging**, not functionality
- ✅ **Error logging will be preserved** (critical for troubleshooting)
- ✅ **Conditional logging recommended** (DEBUG_SESSION flag)
**Recommendation**:
- Proceed with conditional logging using `DEBUG_SESSION` environment variable
- Keep error logging always enabled
- Make success logging conditional
---
### 2. Mark-as-Read Logging Added ✅
**Files Modified**:
1. `app/api/notifications/[id]/read/route.ts`
2. `app/api/notifications/read-all/route.ts`
**Logging Added**:
- ✅ Entry logging (when endpoint is called)
- ✅ Authentication status logging
- ✅ User ID and notification ID logging
- ✅ Success/failure logging with duration
- ✅ Error logging with stack traces
- ✅ Timestamp logging
**Log Format**:
```
[NOTIFICATION_API] Mark as read endpoint called
[NOTIFICATION_API] Mark as read - Processing { userId, notificationId, timestamp }
[NOTIFICATION_API] Mark as read - Success { userId, notificationId, duration }
```
---
## 🔍 What to Look For in Logs
### When Mark-as-Read is Called
**Expected Logs**:
```
[NOTIFICATION_API] Mark as read endpoint called
[NOTIFICATION_API] Mark as read - Processing { userId: "...", notificationId: "...", timestamp: "..." }
[NOTIFICATION_API] Mark as read - Success { userId: "...", notificationId: "...", duration: "Xms" }
```
**If Authentication Fails**:
```
[NOTIFICATION_API] Mark as read - Authentication failed
```
**If Operation Fails**:
```
[NOTIFICATION_API] Mark as read - Failed { userId: "...", notificationId: "...", duration: "Xms" }
```
**If Error Occurs**:
```
[NOTIFICATION_API] Mark as read - Error { error: "...", stack: "...", duration: "Xms" }
```
---
## 📊 Next Steps
### Immediate (Ready to Test)
1. **Test Mark-as-Read Functionality**
- Mark a single notification as read
- Mark all notifications as read
- Check logs for the new logging statements
- Verify notification count updates correctly
2. **Monitor Logs**
- Watch for `[NOTIFICATION_API]` log entries
- Verify timing information
- Check for any errors
### Future (When Ready)
3. **Implement Conditional Session Callback Logging**
- Add `DEBUG_SESSION` environment variable support
- Update `app/api/auth/options.ts`
- Test in development and production
- Document in README
---
## 🎯 Testing Checklist
- [ ] Mark single notification as read → Check logs
- [ ] Mark all notifications as read → Check logs
- [ ] Verify notification count updates
- [ ] Check for any errors in logs
- [ ] Verify performance (duration logging)
- [ ] Test with invalid notification ID
- [ ] Test without authentication
---
## 📝 Files Changed
1. ✅ `app/api/notifications/[id]/read/route.ts` - Added comprehensive logging
2. ✅ `app/api/notifications/read-all/route.ts` - Added comprehensive logging
3. ✅ `SESSION_CALLBACK_LOGGING_IMPACT_ANALYSIS.md` - Created impact analysis
4. ✅ `IMPLEMENTATION_SUMMARY.md` - This file
---
## 🔧 Environment Variables
**No new environment variables required** for mark-as-read logging.
**Future**: `DEBUG_SESSION` will be needed for conditional session callback logging (not implemented yet).
---
## ✅ Status
**Mark-as-Read Logging**: ✅ **COMPLETE**
**Session Callback Impact Analysis**: ✅ **COMPLETE**
**Session Callback Conditional Logging**: ⏳ **PENDING** (awaiting approval)
---
**Generated**: 2026-01-01
**Ready for Testing**: ✅ Yes

View File

@ -1,684 +0,0 @@
# Améliorations du Flow de Connexion - Recommandations
## 📋 Vue d'ensemble
Ce document propose des améliorations concrètes pour corriger et optimiser le flow de connexion/déconnexion du dashboard Next.js avec NextAuth et Keycloak.
---
## 🎯 Problèmes Identifiés et Solutions
### Problème 1 : `prompt=login` toujours actif - Empêche SSO naturel
**Situation actuelle** :
```typescript
// app/api/auth/options.ts ligne 154
authorization: {
params: {
scope: "openid profile email roles",
prompt: "login" // ⚠️ TOUJOURS actif
}
}
```
**Impact** :
- ❌ L'utilisateur doit **toujours** saisir ses identifiants, même lors de la première visite
- ❌ Empêche l'expérience SSO naturelle
- ❌ Mauvaise UX pour les utilisateurs légitimes
**Solution recommandée** : Gérer `prompt=login` conditionnellement
```typescript
// app/api/auth/options.ts
authorization: {
params: {
scope: "openid profile email roles",
// Ne pas forcer prompt=login par défaut
// prompt: "login" // ❌ À SUPPRIMER
}
}
```
**ET** : Ajouter `prompt=login` uniquement après un logout explicite
```typescript
// Dans signIn() après logout
signIn("keycloak", {
callbackUrl: "/",
// Ajouter prompt=login uniquement si logout récent
...(shouldForceLogin ? { prompt: "login" } : {})
});
```
---
### Problème 2 : Détection session invalide trop complexe et fragile
**Situation actuelle** :
- Logique complexe dans `app/signin/page.tsx` (lignes 17-67)
- Vérification multiple de cookies, sessionStorage, URL params
- Race conditions possibles
- Auto-login peut se déclencher incorrectement
**Solution recommandée** : Simplifier avec un flag serveur
#### Option A : Utiliser un cookie serveur pour marquer le logout
```typescript
// app/api/auth/end-sso-session/route.ts
export async function POST(request: NextRequest) {
// ... code existant ...
// Après logout réussi, créer un cookie de flag
const response = NextResponse.json({ success: true });
response.cookies.set('force_login_prompt', 'true', {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
path: '/',
maxAge: 300 // 5 minutes
});
return response;
}
```
#### Option B : Utiliser un paramètre d'état dans l'URL Keycloak
```typescript
// Dans signout-handler.tsx, ajouter un paramètre state
const keycloakLogoutUrl = new URL(
`${keycloakIssuer}/protocol/openid-connect/logout`
);
keycloakLogoutUrl.searchParams.append('state', 'force_login');
// Keycloak renverra ce state dans le redirect
```
**Simplification de signin/page.tsx** :
```typescript
// Vérifier le cookie serveur au lieu de logique complexe
const forceLoginCookie = document.cookie
.split(';')
.find(c => c.trim().startsWith('force_login_prompt='));
if (forceLoginCookie) {
// Supprimer le cookie
document.cookie = 'force_login_prompt=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;';
// Forcer prompt=login
signIn("keycloak", {
callbackUrl: "/",
// Ajouter prompt via custom params si possible
});
}
```
---
### Problème 3 : Cookies Keycloak non supprimables (domaine différent)
**Situation actuelle** :
- `clearKeycloakCookies()` échoue si Keycloak est sur un domaine différent
- Les cookies Keycloak persistent après logout
- Session SSO peut persister
**Solutions recommandées** :
#### Solution 1 : Utiliser Keycloak logout endpoint avec tous les paramètres
```typescript
// Améliorer le logout URL dans signout-handler.tsx
const keycloakLogoutUrl = new URL(
`${keycloakIssuer}/protocol/openid-connect/logout`
);
// Paramètres essentiels
keycloakLogoutUrl.searchParams.append('post_logout_redirect_uri',
window.location.origin + '/signin?logout=true');
keycloakLogoutUrl.searchParams.append('id_token_hint', idToken);
// ✅ AJOUTER ces paramètres pour forcer la suppression SSO
keycloakLogoutUrl.searchParams.append('kc_action', 'LOGOUT'); // Déjà présent
keycloakLogoutUrl.searchParams.append('logout_hint', 'true'); // Nouveau
// Si possible, utiliser le client_id pour forcer logout client
if (process.env.NEXT_PUBLIC_KEYCLOAK_CLIENT_ID) {
keycloakLogoutUrl.searchParams.append('client_id',
process.env.NEXT_PUBLIC_KEYcloak_CLIENT_ID);
}
```
#### Solution 2 : Utiliser Admin API pour terminer TOUTES les sessions
**Améliorer** `app/api/auth/end-sso-session/route.ts` :
```typescript
// Au lieu de logout({ id: userId }), utiliser logoutAllSessions
try {
// Option 1 : Logout toutes les sessions de l'utilisateur
await adminClient.users.logout({ id: userId });
// Option 2 : Si disponible, utiliser une méthode plus agressive
// Note: Vérifier la version de Keycloak Admin Client
// Certaines versions supportent logoutAllSessions
// Option 3 : Invalider les refresh tokens
const userSessions = await adminClient.users.listSessions({ id: userId });
for (const session of userSessions) {
await adminClient.users.logoutSession({
id: userId,
sessionId: session.id
});
}
} catch (error) {
// ... gestion erreur
}
```
#### Solution 3 : Configurer Keycloak pour SameSite=None (si cross-domain)
**Configuration Keycloak** (à faire côté Keycloak) :
```
Cookie SameSite: None
Cookie Secure: true
Cookie Domain: .example.com (domaine parent partagé)
```
**Puis** améliorer `clearKeycloakCookies()` :
```typescript
// lib/session.ts
export function clearKeycloakCookies() {
const keycloakIssuer = process.env.NEXT_PUBLIC_KEYCLOAK_ISSUER;
if (!keycloakIssuer) return;
try {
const keycloakUrl = new URL(keycloakIssuer);
const keycloakDomain = keycloakUrl.hostname;
// Extraire le domaine parent si possible
const domainParts = keycloakDomain.split('.');
const parentDomain = domainParts.length > 2
? '.' + domainParts.slice(-2).join('.')
: keycloakDomain;
const keycloakCookieNames = [
'KEYCLOAK_SESSION',
'KEYCLOAK_SESSION_LEGACY',
'KEYCLOAK_IDENTITY',
'KEYCLOAK_IDENTITY_LEGACY',
'AUTH_SESSION_ID',
'KC_RESTART',
'KC_RESTART_LEGACY'
];
// Essayer avec domaine parent (pour SameSite=None)
keycloakCookieNames.forEach(cookieName => {
// Avec domaine parent
document.cookie = `${cookieName}=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/; domain=${parentDomain}; SameSite=None; Secure;`;
// Sans domaine (same-origin)
document.cookie = `${cookieName}=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/; SameSite=None; Secure;`;
// Avec path spécifique
document.cookie = `${cookieName}=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/realms/; domain=${parentDomain}; SameSite=None; Secure;`;
});
} catch (error) {
console.error('Error clearing Keycloak cookies:', error);
}
}
```
---
### Problème 4 : Race condition entre logout et auto-login
**Situation actuelle** :
- Auto-login avec délai de 1 seconde
- Peut se déclencher pendant le flow de logout
- Flags `logout_in_progress` et `session_invalidated` peuvent être perdus
**Solution recommandée** : Utiliser un mécanisme plus robuste
#### Option A : Utiliser un cookie HttpOnly pour le flag
```typescript
// Créer une route API pour marquer le logout
// app/api/auth/mark-logout/route.ts
export async function POST() {
const response = NextResponse.json({ success: true });
response.cookies.set('logout_completed', 'true', {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
path: '/',
maxAge: 300 // 5 minutes
});
return response;
}
// Dans signout-handler.tsx
await fetch('/api/auth/mark-logout', { method: 'POST' });
// Dans signin/page.tsx
const logoutCompleted = document.cookie
.split(';')
.some(c => c.trim().startsWith('logout_completed='));
if (logoutCompleted) {
// Supprimer le cookie
document.cookie = 'logout_completed=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;';
// Ne pas auto-login
return;
}
```
#### Option B : Utiliser un état dans l'URL Keycloak callback
```typescript
// Lors du logout, ajouter un paramètre state
const logoutState = btoa(JSON.stringify({
logout: true,
timestamp: Date.now()
}));
// Keycloak renverra ce state dans le redirect
// Dans signin/page.tsx, vérifier le state
const urlParams = new URLSearchParams(window.location.search);
const state = urlParams.get('state');
if (state) {
try {
const stateData = JSON.parse(atob(state));
if (stateData.logout) {
// Ne pas auto-login
return;
}
} catch (e) {
// State invalide, ignorer
}
}
```
---
### Problème 5 : Configuration cookies NextAuth non explicite
**Situation actuelle** :
- Pas de configuration explicite des cookies NextAuth
- Utilise les valeurs par défaut
- Pas de contrôle sur SameSite, Secure, etc.
**Solution recommandée** : Configurer explicitement les cookies
```typescript
// app/api/auth/options.ts
export const authOptions: NextAuthOptions = {
// ... providers ...
// ✅ AJOUTER configuration explicite des cookies
cookies: {
sessionToken: {
name: `next-auth.session-token`,
options: {
httpOnly: true,
sameSite: 'lax',
path: '/',
secure: process.env.NEXTAUTH_URL?.startsWith('https://') ?? false,
// Domaine explicite si nécessaire
// domain: process.env.COOKIE_DOMAIN,
},
},
callbackUrl: {
name: `next-auth.callback-url`,
options: {
httpOnly: true,
sameSite: 'lax',
path: '/',
secure: process.env.NEXTAUTH_URL?.startsWith('https://') ?? false,
},
},
csrfToken: {
name: `next-auth.csrf-token`,
options: {
httpOnly: true,
sameSite: 'lax',
path: '/',
secure: process.env.NEXTAUTH_URL?.startsWith('https://') ?? false,
},
},
state: {
name: `next-auth.state`,
options: {
httpOnly: true,
sameSite: 'lax',
path: '/',
secure: process.env.NEXTAUTH_URL?.startsWith('https://') ?? false,
},
},
},
// ... reste de la config ...
};
```
**Avantages** :
- Contrôle total sur les cookies
- Peut ajuster SameSite pour cross-domain si nécessaire
- Meilleure sécurité
---
### Problème 6 : Détection session invalide côté client uniquement
**Situation actuelle** :
- Détection session invalide uniquement côté client
- Vérification de cookies via `document.cookie`
- Peut être contourné ou mal interprété
**Solution recommandée** : Détection serveur + client
#### Créer une route API pour vérifier l'état de session
```typescript
// app/api/auth/session-status/route.ts
import { NextRequest, NextResponse } from 'next/server';
import { getServerSession } from 'next-auth/next';
import { authOptions } from '../options';
export async function GET(request: NextRequest) {
const session = await getServerSession(authOptions);
// Vérifier si la session existe mais est invalide
const hasSessionCookie = request.cookies.has('next-auth.session-token') ||
request.cookies.has('__Secure-next-auth.session-token') ||
request.cookies.has('__Host-next-auth.session-token');
return NextResponse.json({
hasSession: !!session,
hasSessionCookie,
isInvalid: hasSessionCookie && !session, // Cookie existe mais session invalide
shouldForceLogin: request.cookies.get('force_login_prompt')?.value === 'true',
});
}
```
#### Utiliser cette route dans signin/page.tsx
```typescript
// app/signin/page.tsx
useEffect(() => {
const checkSessionStatus = async () => {
const response = await fetch('/api/auth/session-status');
const status = await response.json();
if (status.isInvalid) {
// Session invalidée, ne pas auto-login
sessionStorage.setItem('session_invalidated', 'true');
return;
}
if (status.shouldForceLogin) {
// Forcer prompt=login
signIn("keycloak", {
callbackUrl: "/",
// prompt: "login" via custom params si possible
});
return;
}
// Nouvel utilisateur, auto-login OK
if (status === "unauthenticated" && !status.hasSessionCookie) {
signIn("keycloak", { callbackUrl: "/" });
}
};
checkSessionStatus();
}, []);
```
---
### Problème 7 : Gestion d'erreur token refresh insuffisante
**Situation actuelle** :
- `refreshAccessToken()` détecte `SessionNotActive`
- Retourne `error: "SessionNotActive"`
- Mais la gestion peut être améliorée
**Solution recommandée** : Améliorer la gestion d'erreur
```typescript
// app/api/auth/options.ts
async function refreshAccessToken(token: ExtendedJWT) {
try {
const response = await fetch(`${process.env.KEYCLOAK_ISSUER}/protocol/openid-connect/token`, {
headers: { "Content-Type": "application/x-www-form-urlencoded" },
body: new URLSearchParams({
client_id: process.env.KEYCLOAK_CLIENT_ID!,
client_secret: process.env.KEYCLOAK_CLIENT_SECRET!,
grant_type: "refresh_token",
refresh_token: token.refreshToken || '',
}),
method: "POST",
});
const refreshedTokens = await response.json();
if (!response.ok) {
// ✅ AMÉLIORATION : Détecter différents types d'erreurs
const errorType = refreshedTokens.error;
const errorDescription = refreshedTokens.error_description || '';
// Session invalide (logout depuis iframe ou Keycloak)
if (errorType === 'invalid_grant' ||
errorDescription.includes('Session not active') ||
errorDescription.includes('Token is not active') ||
errorDescription.includes('Session expired')) {
console.log("Keycloak session invalidated, marking for removal");
return {
...token,
error: "SessionNotActive",
// ✅ Supprimer tous les tokens
accessToken: undefined,
refreshToken: undefined,
idToken: undefined,
};
}
// Refresh token expiré (inactivité prolongée)
if (errorType === 'invalid_grant' &&
errorDescription.includes('Refresh token expired')) {
console.log("Refresh token expired, user needs to re-authenticate");
return {
...token,
error: "RefreshTokenExpired",
accessToken: undefined,
refreshToken: undefined,
idToken: undefined,
};
}
// Autre erreur
throw refreshedTokens;
}
return {
...token,
accessToken: refreshedTokens.access_token,
refreshToken: refreshedTokens.refresh_token ?? token.refreshToken,
idToken: token.idToken, // Keycloak ne renvoie pas de nouvel ID token
accessTokenExpires: Date.now() + refreshedTokens.expires_in * 1000,
error: undefined, // ✅ Clear any previous errors
};
} catch (error: any) {
console.error("Error refreshing access token:", error);
// ✅ AMÉLIORATION : Gestion d'erreur plus robuste
if (error?.error === 'invalid_grant' ||
error?.error_description?.includes('Session not active') ||
error?.error_description?.includes('Token is not active')) {
return {
...token,
error: "SessionNotActive",
accessToken: undefined,
refreshToken: undefined,
idToken: undefined,
};
}
return {
...token,
error: "RefreshAccessTokenError",
accessToken: undefined,
refreshToken: undefined,
idToken: undefined,
};
}
}
```
---
## 🚀 Plan d'Implémentation Recommandé
### Phase 1 : Corrections Critiques (Priorité Haute)
1. **Supprimer `prompt=login` par défaut**
- Fichier : `app/api/auth/options.ts`
- Impact : Améliore l'UX pour les utilisateurs légitimes
2. **Améliorer la gestion d'erreur token refresh**
- Fichier : `app/api/auth/options.ts`
- Impact : Meilleure détection des sessions invalides
3. **Configurer explicitement les cookies NextAuth**
- Fichier : `app/api/auth/options.ts`
- Impact : Meilleur contrôle et sécurité
### Phase 2 : Améliorations Logout (Priorité Moyenne)
4. **Créer route API pour marquer logout**
- Nouveau fichier : `app/api/auth/mark-logout/route.ts`
- Modifier : `components/auth/signout-handler.tsx`
- Modifier : `app/signin/page.tsx`
- Impact : Élimine les race conditions
5. **Améliorer Keycloak logout URL**
- Modifier : `components/auth/signout-handler.tsx`
- Modifier : `components/main-nav.tsx`
- Modifier : `components/layout/layout-wrapper.tsx`
- Impact : Meilleure suppression des sessions SSO
6. **Améliorer `clearKeycloakCookies()`**
- Modifier : `lib/session.ts`
- Impact : Meilleure tentative de suppression cookies cross-domain
### Phase 3 : Optimisations (Priorité Basse)
7. **Créer route API pour vérifier statut session**
- Nouveau fichier : `app/api/auth/session-status/route.ts`
- Modifier : `app/signin/page.tsx`
- Impact : Détection plus fiable
8. **Simplifier logique signin/page.tsx**
- Modifier : `app/signin/page.tsx`
- Impact : Code plus maintenable
---
## 📝 Checklist d'Implémentation
### Étape 1 : Configuration NextAuth
- [ ] Supprimer `prompt: "login"` par défaut dans `options.ts`
- [ ] Ajouter configuration explicite des cookies
- [ ] Améliorer gestion d'erreur `refreshAccessToken()`
### Étape 2 : Amélioration Logout
- [ ] Créer route `/api/auth/mark-logout`
- [ ] Modifier `signout-handler.tsx` pour utiliser la route
- [ ] Améliorer Keycloak logout URL avec tous les paramètres
- [ ] Améliorer `clearKeycloakCookies()` pour cross-domain
### Étape 3 : Amélioration Signin
- [ ] Créer route `/api/auth/session-status`
- [ ] Simplifier logique dans `signin/page.tsx`
- [ ] Utiliser cookie serveur pour détecter logout
- [ ] Ajouter `prompt=login` conditionnel après logout
### Étape 4 : Tests
- [ ] Tester login normal (première visite)
- [ ] Tester login après logout (doit demander credentials)
- [ ] Tester logout depuis dashboard
- [ ] Tester logout depuis iframe
- [ ] Tester expiration session
- [ ] Tester refresh token expiré
---
## 🔧 Configuration Keycloak Recommandée
### Paramètres Client OAuth
1. **Valid Redirect URIs** :
```
http://localhost:3000/api/auth/callback/keycloak
https://your-domain.com/api/auth/callback/keycloak
```
2. **Web Origins** :
```
http://localhost:3000
https://your-domain.com
```
3. **Post Logout Redirect URIs** :
```
http://localhost:3000/signin?logout=true
https://your-domain.com/signin?logout=true
```
4. **Access Token Lifespan** : 5 minutes (recommandé)
5. **SSO Session Idle** : 30 minutes
6. **SSO Session Max** : 10 heures
### Configuration Realm
1. **Cookies** :
- SameSite: `None` (si cross-domain)
- Secure: `true` (si HTTPS)
- Domain: Domaine parent partagé (si cross-domain)
2. **Session Management** :
- Enable SSO Session Idle: `true`
- SSO Session Idle Timeout: 30 minutes
- SSO Session Max Lifespan: 10 heures
---
## 📊 Métriques de Succès
Après implémentation, vérifier :
1. ✅ **Login première visite** : SSO fonctionne (pas de prompt si session Keycloak existe)
2. ✅ **Login après logout** : Prompt credentials demandé
3. ✅ **Logout dashboard** : Toutes les sessions supprimées
4. ✅ **Logout iframe** : Dashboard se déconnecte automatiquement
5. ✅ **Expiration session** : Redirection vers signin propre
6. ✅ **Refresh token expiré** : Redirection vers signin avec message approprié
---
## 🎯 Résumé des Améliorations
| Problème | Solution | Priorité | Fichiers Impactés |
|----------|----------|----------|-------------------|
| `prompt=login` toujours actif | Gérer conditionnellement | Haute | `options.ts`, `signin/page.tsx` |
| Détection session invalide complexe | Cookie serveur + route API | Haute | `signin/page.tsx`, `end-sso-session/route.ts` |
| Cookies Keycloak non supprimables | Améliorer logout URL + Admin API | Moyenne | `signout-handler.tsx`, `session.ts` |
| Race condition logout/login | Cookie HttpOnly pour flag | Moyenne | `signout-handler.tsx`, `signin/page.tsx` |
| Config cookies non explicite | Configuration explicite | Haute | `options.ts` |
| Gestion erreur refresh | Améliorer détection erreurs | Haute | `options.ts` |
---
**Document créé le** : $(date)
**Dernière mise à jour** : Recommandations d'amélioration du flow de connexion

View File

@ -1,232 +0,0 @@
# Inactivity Timeout and Logout Analysis
## Issue 1: Dashboard Should Disconnect After 30 Minutes of Inactivity
### Current State
**Session Configuration** (`app/api/auth/options.ts:190`):
```typescript
session: {
strategy: "jwt",
maxAge: 30 * 24 * 60 * 60, // 30 days
}
```
**SessionProvider Configuration** (`components/providers.tsx`):
```typescript
<SessionProvider>
{children}
</SessionProvider>
```
### Problem Analysis
1. **No Inactivity Detection**:
- NextAuth session is set to 30 days maximum
- No client-side inactivity timeout logic exists
- No activity tracking (mouse movements, clicks, keyboard input)
- SessionProvider doesn't have `refetchInterval` configured
2. **How NextAuth Sessions Work**:
- NextAuth sessions are JWT-based (stateless)
- Session validity is checked on each request to `/api/auth/session`
- No automatic expiration based on inactivity
- Session expires only when `maxAge` is reached (30 days in your case)
3. **What's Missing**:
- Client-side activity monitoring
- Automatic session invalidation after inactivity period
- Session refresh based on activity (not just time)
### Root Cause
**NextAuth doesn't track user activity** - it only tracks session age. The session will remain valid for 30 days regardless of whether the user is active or not.
### Solution Requirements
To implement 30-minute inactivity timeout, you need:
1. **Client-Side Activity Tracking**:
- Monitor user activity (mouse, keyboard, clicks)
- Track last activity timestamp
- Store in `sessionStorage` or `localStorage`
2. **Session Invalidation Logic**:
- Check inactivity period on each page interaction
- Call `signOut()` if inactivity exceeds 30 minutes
- Clear NextAuth session and Keycloak session
3. **Activity Reset on User Actions**:
- Reset inactivity timer on any user interaction
- Update last activity timestamp
4. **SessionProvider Configuration**:
- Optionally configure `refetchInterval` to check session periodically
- But this won't help with inactivity - it only refreshes the session
### Implementation Approach
The inactivity timeout must be implemented **client-side** because:
- NextAuth sessions are stateless (JWT)
- Server doesn't know about user activity
- Activity tracking requires browser events
**Recommended Implementation**:
1. Create an `InactivityHandler` component
2. Monitor user activity events (mousemove, keydown, click, scroll)
3. Store last activity time in `sessionStorage`
4. Check inactivity every minute (or on page focus)
5. If inactivity > 30 minutes, trigger logout
---
## Issue 2: Applications Outside Dashboard Still Connected After Logout
### Current Implementation
**Logout Flow** (`components/main-nav.tsx`, `components/auth/signout-handler.tsx`):
1. Clear NextAuth cookies
2. Clear Keycloak cookies (client-side attempt)
3. Call `/api/auth/end-sso-session` (NEW)
- Uses Keycloak Admin API: `adminClient.users.logout({ id: userId })`
4. Sign out from NextAuth
5. Redirect to Keycloak logout endpoint with `id_token_hint`
### Problem Analysis
**Why Applications Are Still Connected:**
1. **Keycloak Admin API `users.logout()` Behavior**:
- The method `adminClient.users.logout({ id: userId })` **logs out the user from all client sessions**
- However, it may **NOT clear the SSO session cookie** (`KEYCLOAK_SESSION`)
- The SSO session cookie is what allows applications to auto-authenticate
2. **SSO Session vs Client Sessions**:
- **Client Sessions**: Per OAuth client (dashboard, app1, app2, etc.)
- **SSO Session**: Realm-wide, shared across all clients
- `users.logout()` clears client sessions but may leave SSO session active
- Applications check for SSO session cookie, not client sessions
3. **Cookie Domain/Path Issues**:
- Keycloak cookies are set on Keycloak's domain
- Client-side `clearKeycloakCookies()` may not work if:
- Cookies are `HttpOnly` (can't be cleared from JavaScript)
- Cookies are on different domain (cross-domain restrictions)
- Cookies have different path/domain settings
4. **Logout Endpoint Behavior**:
- Keycloak logout endpoint (`/protocol/openid-connect/logout`) with `id_token_hint`:
- Clears the **client session** for that specific OAuth client
- May clear SSO session **only if it's the last client session**
- If other applications have active sessions, SSO session persists
### Root Cause
**The SSO session cookie persists** because:
1. `users.logout()` Admin API method clears client sessions but may not clear SSO session cookie
2. Keycloak logout endpoint only clears SSO session if it's the last client session
3. If other applications have active sessions, the SSO session remains valid
4. Applications check for SSO session cookie, not client sessions
### Why This Happens
**Keycloak's SSO Design**:
- SSO session is designed to persist across client logouts
- This allows users to stay logged in across multiple applications
- Logging out from one application shouldn't log out from all applications
- This is **by design** for SSO functionality
**However**, when you want **global logout**, you need to:
1. Clear the SSO session cookie explicitly
2. Or ensure all client sessions are logged out first
3. Or use Keycloak's Single Logout (SLO) feature
### Solution Requirements
To ensure applications are logged out:
1. **Keycloak Configuration** (Server-Side):
- Enable **Front-Channel Logout** for all clients
- Configure **Back-Channel Logout URLs** for each client
- This allows Keycloak to notify all applications when logout occurs
2. **Admin API Limitations**:
- `users.logout()` may not clear SSO session cookie
- Need to use Keycloak's logout endpoint with proper parameters
- Or use Keycloak Admin API to end SSO session directly (if available)
3. **Alternative Approach**:
- Use Keycloak's **Single Logout (SLO)** feature
- Configure all clients to participate in SLO
- When one client logs out, all clients are notified
### What's Actually Happening
When you call `/api/auth/end-sso-session`:
1. ✅ Admin API `users.logout()` is called
2. ✅ All client sessions are logged out
3. ❌ SSO session cookie may still exist
4. ❌ Applications check SSO session cookie → still authenticated
When you redirect to Keycloak logout endpoint:
1. ✅ Dashboard client session is cleared
2. ✅ If it's the last client session, SSO session is cleared
3. ❌ If other applications have active sessions, SSO session persists
4. ❌ Applications can still authenticate using SSO session cookie
### Verification Steps
To verify why applications are still connected:
1. **Check if Admin API call succeeds**:
- Look for console logs: "Successfully ended SSO session for user: {userId}"
- Check for errors in `/api/auth/end-sso-session` endpoint
2. **Check Keycloak session cookies**:
- After logout, check browser cookies for:
- `KEYCLOAK_SESSION`
- `KEYCLOAK_SESSION_LEGACY`
- `KEYCLOAK_IDENTITY`
- If these cookies still exist, SSO session is still active
3. **Check if other applications have active sessions**:
- If other applications are open in other tabs/windows
- They may have active client sessions
- This prevents SSO session from being cleared
4. **Check Keycloak Admin Console**:
- Navigate to: Users → [User] → Sessions
- Check if sessions are actually cleared
- Verify SSO session status
### Recommended Solutions
**Option 1: Keycloak Configuration (Recommended)**
- Enable Front-Channel Logout for all clients
- Configure Back-Channel Logout URLs
- This ensures all applications are notified of logout
**Option 2: Clear SSO Session Cookie Explicitly**
- After Admin API logout, redirect to Keycloak logout endpoint
- Use `kc_action=LOGOUT` parameter (already implemented)
- Ensure all client sessions are logged out first
**Option 3: Use Keycloak Single Logout (SLO)**
- Configure all clients to participate in SLO
- When dashboard logs out, all clients are automatically logged out
- Requires Keycloak configuration changes
---
## Summary
### Issue 1: 30-Minute Inactivity Timeout
- **Status**: Not implemented
- **Reason**: NextAuth doesn't track activity, only session age
- **Solution**: Client-side activity tracking + automatic logout
### Issue 2: Applications Still Connected
- **Status**: Partially working
- **Reason**: SSO session cookie persists even after client sessions are cleared
- **Solution**: Keycloak configuration (Front-Channel Logout) or SLO

View File

@ -1,224 +0,0 @@
# Investigation Erreur 502 - Redirection depuis Keycloak
## 🔍 Problème Identifié
**Symptôme** : Erreur 502 après redirection depuis Keycloak après authentification
**Logs observés** :
```
Keycloak profile callback: {
rawProfile: { ... },
rawRoles: undefined,
realmAccess: undefined, // ⚠️ PROBLÈME ICI
groups: [ ... ] // ✅ Groups présents
}
Profile callback raw roles: []
Profile callback cleaned roles: []
```
## 🎯 Cause Racine
Le profil Keycloak (ID token) **ne contient pas** `realm_access.roles`, mais contient `groups`. Les rôles sont probablement dans le **token d'accès**, pas dans le token ID.
## ✅ Correction Appliquée
**Fichier** : `app/api/auth/options.ts` - Callback JWT
**Changements** :
1. ✅ Extraction des rôles depuis le **token d'accès** (pas seulement le profil)
2. ✅ Fallback sur `groups` si pas de rôles
3. ✅ Logs améliorés pour debugging
**Code modifié** :
```typescript
// Avant : Seulement depuis profile
const roles = keycloakProfile.realm_access?.roles || [];
// Après : Multi-sources
1. Essayer depuis profile (ID token)
2. Si vide, décoder access token
3. Si toujours vide, utiliser groups comme fallback
```
## 🔍 Points d'Investigation pour l'Erreur 502
### 1. Vérifier les logs serveur Next.js complets
**Où chercher** :
- Terminal Next.js (erreurs complètes)
- Logs de production (si déployé)
- Console navigateur (erreurs client)
**Commandes utiles** :
```bash
# Voir tous les logs
npm run dev 2>&1 | tee logs.txt
# Filtrer les erreurs
npm run dev 2>&1 | grep -i "error\|502\|exception"
```
### 2. Vérifier le callback NextAuth
**Fichier** : `app/api/auth/callback/keycloak` (géré par NextAuth)
**Points à vérifier** :
- ✅ Le callback reçoit bien le code OAuth
- ✅ L'échange code → tokens fonctionne
- ✅ Le callback JWT s'exécute sans erreur
- ✅ Le callback session s'exécute sans erreur
**Ajouter des logs** :
```typescript
// Dans options.ts, callback jwt
console.log('JWT callback - account:', !!account, 'profile:', !!profile);
console.log('JWT callback - accessToken length:', account?.access_token?.length);
console.log('JWT callback - roles extracted:', cleanRoles);
```
### 3. Vérifier l'initialisation storage
**Fichier** : `app/api/storage/init/route.ts`
**Problème potentiel** :
- Si `createUserFolderStructure()` échoue, ça peut causer une 502
- Si la session n'est pas encore complètement créée
**Vérifications** :
```typescript
// Vérifier si l'erreur vient de là
console.log('Storage init - session:', session);
console.log('Storage init - user id:', session?.user?.id);
```
### 4. Vérifier la configuration Keycloak
**Problème potentiel** :
- Les rôles ne sont pas mappés correctement dans Keycloak
- Le scope "roles" n'est pas configuré correctement
- Les mappers de token ne sont pas configurés
**À vérifier dans Keycloak** :
1. **Client Configuration** :
- Scope "roles" est-il activé ?
- Mapper "realm roles" est-il configuré ?
2. **Token Mappers** :
- `realm_access.roles` mapper existe-t-il ?
- Est-il ajouté au token d'accès ?
3. **User Roles** :
- L'utilisateur a-t-il des rôles assignés dans le realm ?
### 5. Vérifier les erreurs dans le callback session
**Fichier** : `app/api/auth/options.ts` - Callback session
**Problème potentiel** :
- Si `token.role` est undefined et qu'un code s'attend à un array
- Si `session.user` est mal formé
**Vérifications** :
```typescript
// Dans callback session
console.log('Session callback - token.role:', token.role);
console.log('Session callback - token.error:', token.error);
console.log('Session callback - hasAccessToken:', !!token.accessToken);
```
### 6. Vérifier les timeouts
**Problème potentiel** :
- Timeout lors de l'appel à Keycloak pour échanger le code
- Timeout lors de l'initialisation storage
- Timeout lors de la création de la session
**Solutions** :
- Augmenter les timeouts si nécessaire
- Vérifier la latence réseau vers Keycloak
## 🛠️ Actions Immédiates
### Action 1 : Ajouter plus de logs
**Fichier** : `app/api/auth/options.ts`
```typescript
// Dans callback jwt, après extraction des rôles
console.log('=== JWT CALLBACK DEBUG ===');
console.log('Has account:', !!account);
console.log('Has profile:', !!profile);
console.log('Access token present:', !!account?.access_token);
console.log('Roles from profile:', keycloakProfile.realm_access?.roles);
console.log('Roles from access token:', roles);
console.log('Final roles:', cleanRoles);
console.log('==========================');
```
### Action 2 : Vérifier l'erreur exacte
**Dans le terminal Next.js**, chercher :
- Stack trace complète
- Message d'erreur exact
- Ligne de code qui cause l'erreur
### Action 3 : Tester avec un utilisateur simple
**Tester avec** :
- Un utilisateur avec des rôles
- Un utilisateur sans rôles
- Vérifier si l'erreur est liée aux rôles
### Action 4 : Vérifier la configuration Keycloak
**Dans Keycloak Admin Console** :
1. **Client → Mappers** :
- Vérifier qu'il y a un mapper "realm roles"
- Vérifier qu'il est ajouté au token d'accès
- Vérifier qu'il ajoute `realm_access.roles`
2. **Client → Settings** :
- Vérifier que "Full Scope Allowed" est activé
- Vérifier les "Default Client Scopes" incluent "roles"
3. **Realm → Roles** :
- Vérifier que l'utilisateur a des rôles assignés
## 📊 Checklist de Debugging
- [ ] Logs serveur Next.js complets vérifiés
- [ ] Erreur exacte identifiée (stack trace)
- [ ] Callback JWT s'exécute sans erreur
- [ ] Callback session s'exécute sans erreur
- [ ] Rôles extraits correctement (depuis access token)
- [ ] Storage init fonctionne
- [ ] Configuration Keycloak vérifiée
- [ ] Mappers Keycloak vérifiés
- [ ] Timeouts vérifiés
## 🔧 Solution Temporaire (si nécessaire)
Si l'erreur persiste, on peut temporairement utiliser les `groups` comme rôles :
```typescript
// Dans profile callback
const roles = keycloakProfile.realm_access?.roles ||
keycloakProfile.groups ||
[];
```
**Note** : Ce n'est qu'une solution temporaire. Il faut corriger la configuration Keycloak pour avoir les rôles correctement mappés.
## 📝 Prochaines Étapes
1. ✅ **Correction appliquée** : Extraction rôles depuis access token
2. ⏳ **À faire** : Vérifier les logs serveur pour l'erreur exacte
3. ⏳ **À faire** : Vérifier configuration Keycloak
4. ⏳ **À faire** : Tester après correction
---
**Document créé le** : $(date)
**Statut** : Correction appliquée, investigation en cours

View File

@ -1,123 +0,0 @@
# Keycloak Session Synchronization Fix
## Problem
When a user is still logged into the NextAuth dashboard (session valid for 30 days), but Keycloak session cookies have expired (typically 30 minutes to a few hours), iframe applications can't authenticate because they rely on Keycloak cookies for SSO.
**Symptoms**:
- User is logged into dashboard
- Iframe applications ask for Keycloak login again
- NextAuth session is still valid, but Keycloak cookies expired
## Root Cause
**Session Mismatch**:
- **NextAuth Session**: 30 days (JWT-based, stored in encrypted cookie)
- **Keycloak Session Cookies**: Typically 30 minutes to a few hours (set by Keycloak server)
- **Iframe Applications**: Rely on Keycloak session cookies for SSO, not NextAuth tokens
When Keycloak session cookies expire, iframe applications can't authenticate even though NextAuth session is still valid.
## Solution Implemented
### 1. Session Refresh API Endpoint
Created `/api/auth/refresh-keycloak-session` that:
- Uses the refresh token to get new Keycloak tokens
- Ensures tokens are fresh before loading iframes
- Helps maintain token synchronization
### 2. Automatic Session Refresh Before Iframe Load
Updated `ResponsiveIframe` component to:
- Automatically refresh the session before loading iframe applications
- Show a loading indicator during refresh
- Ensure tokens are fresh when iframes load
### 3. Exposed Refresh Token in Session
- Added `refreshToken` to session object
- Allows API endpoints to refresh tokens when needed
## Files Modified
1. **`app/api/auth/refresh-keycloak-session/route.ts`** (NEW)
- API endpoint to refresh Keycloak tokens
- Uses refresh token to get new access tokens
2. **`app/components/responsive-iframe.tsx`**
- Automatically refreshes session before loading iframe
- Shows loading indicator during refresh
3. **`app/api/auth/options.ts`**
- Exposes `refreshToken` in session object
4. **`types/next-auth.d.ts`**
- Added `refreshToken` to Session interface
## Limitations
**Important**: This solution refreshes OAuth tokens, but **Keycloak session cookies are separate** and are set by Keycloak when the user authenticates via the browser. Refreshing OAuth tokens doesn't automatically refresh Keycloak session cookies.
### Why This Happens
Keycloak maintains two separate sessions:
1. **OAuth Token Session**: Managed via refresh tokens (what we refresh)
2. **Browser Session Cookies**: Set by Keycloak during login, expire based on Keycloak's session timeout settings
### Recommended Solutions
#### Option 1: Configure Keycloak Session Timeout (Recommended)
Increase Keycloak's SSO session timeout to match or exceed NextAuth's 30-day session:
1. Go to Keycloak Admin Console
2. Navigate to: Realm Settings → Sessions
3. Set **SSO Session Idle** to match your needs (e.g., 30 days)
4. Set **SSO Session Max** to match (e.g., 30 days)
This ensures Keycloak cookies don't expire before NextAuth session.
#### Option 2: Pass Access Token to Iframe Applications
If iframe applications support token-based authentication:
- Pass `accessToken` via URL parameter: `?token=${accessToken}`
- Or use `postMessage` to send token to iframe
- Iframe applications can then use the token for authentication
#### Option 3: Periodic Session Refresh
Implement a periodic refresh mechanism that:
- Checks session validity every 15-20 minutes
- Refreshes tokens proactively
- May help keep Keycloak session active
## Testing
1. Log in to dashboard
2. Wait for Keycloak session to expire (or manually clear Keycloak cookies)
3. Navigate to an iframe application
4. Session should be refreshed automatically
5. Iframe should load without requiring login
## Environment Variables Required
```bash
NEXT_PUBLIC_KEYCLOAK_ISSUER=https://keycloak.example.com/realms/neah
KEYCLOAK_CLIENT_ID=neah-dashboard
KEYCLOAK_CLIENT_SECRET=<secret>
```
## Future Improvements
1. **Implement invisible iframe to Keycloak**: Use Keycloak's check-session-iframe to refresh cookies
2. **Token passing**: Pass access tokens to iframe applications if they support it
3. **Proactive refresh**: Implement periodic token refresh to prevent expiration
4. **Session monitoring**: Monitor Keycloak session status and refresh proactively
---
**Date**: 2024
**Status**: ✅ Implemented (with limitations)
**Version**: 1.0

View File

@ -1,157 +0,0 @@
# Leantime API Fixes - Mark Notifications as Read
**Date**: 2026-01-01
**Issue**: Mark all as read failing due to incorrect API method names
**Status**: ✅ Fixed
---
## 🔍 Issues Found
### Issue 1: Incorrect Method Name for Single Notification
**Current Code** (WRONG):
```typescript
method: 'leantime.rpc.Notifications.Notifications.markNotificationAsRead'
params: {
userId: leantimeUserId,
notificationId: parseInt(sourceId) // Wrong parameter name
}
```
**Leantime Documentation** (CORRECT):
```typescript
method: 'leantime.rpc.Notifications.Notifications.markNotificationRead' // No "As" in method name
params: {
id: parseInt(sourceId), // Parameter is "id", not "notificationId"
userId: leantimeUserId
}
```
**Fix Applied**: ✅ Changed method name and parameter names to match Leantime API
---
### Issue 2: No "Mark All" Method Exists
**Problem**:
- Leantime API does NOT have a `markAllNotificationsAsRead` method
- Current code tries to call a non-existent method
**Solution**:
- Fetch all unread notifications
- Mark each one individually using `markNotificationRead`
- Process in parallel for better performance
**Fix Applied**: ✅ Implemented loop-based approach to mark all notifications individually
---
## ✅ Changes Made
### 1. Fixed `markAsRead` Method
**File**: `lib/services/notifications/leantime-adapter.ts`
**Changes**:
- ✅ Method name: `markNotificationAsRead``markNotificationRead`
- ✅ Parameter: `notificationId``id`
- ✅ Parameter order: `id` first, then `userId` (matching Leantime docs)
- ✅ Added request logging
---
### 2. Fixed `markAllAsRead` Method
**File**: `lib/services/notifications/leantime-adapter.ts`
**New Implementation**:
1. Fetch all unread notifications (up to 1000)
2. Filter to get only unread ones
3. Mark each notification individually using `markNotificationRead`
4. Process in parallel using `Promise.all()`
5. Return success if majority succeed
**Benefits**:
- ✅ Works with actual Leantime API
- ✅ Handles partial failures gracefully
- ✅ Parallel processing for better performance
- ✅ Detailed logging for each notification
---
## 📊 Expected Behavior After Fix
### Mark Single Notification as Read
**Before**: ❌ Failed (wrong method name)
**After**: ✅ Should work correctly
**Logs**:
```
[LEANTIME_ADAPTER] markAsRead - Request body: {"method":"markNotificationRead",...}
[LEANTIME_ADAPTER] markAsRead - Success: true
```
---
### Mark All Notifications as Read
**Before**: ❌ Failed (method doesn't exist)
**After**: ✅ Should work (marks each individually)
**Logs**:
```
[LEANTIME_ADAPTER] markAllAsRead - Fetching all unread notifications
[LEANTIME_ADAPTER] markAllAsRead - Found 66 unread notifications to mark
[LEANTIME_ADAPTER] markAllAsRead - Results: 66 succeeded, 0 failed out of 66 total
[LEANTIME_ADAPTER] markAllAsRead - Overall success: true
```
---
## 🎯 Count vs Display Issue
**Current Situation**:
- Count: 66 unread (from first 100 notifications)
- Display: 10 notifications shown (pagination)
**Why**:
- `getNotificationCount()` fetches first 100 notifications and counts unread
- `getNotifications()` with default limit=20 shows first 10-20
- This is expected behavior but can be confusing
**Options**:
1. **Accept limitation**: Document that count is based on first 100
2. **Fetch all for count**: More accurate but slower
3. **Use dedicated count API**: If Leantime provides one
4. **Show "66+ unread"**: If count reaches 100, indicate there may be more
**Recommendation**: Keep current behavior but add a note in UI if count = 100 (may have more)
---
## 🚀 Next Steps
1. ✅ **Test Mark Single as Read**: Should work now with correct method name
2. ✅ **Test Mark All as Read**: Should work by marking each individually
3. ⏳ **Verify Count Updates**: After marking, count should decrease
4. ⏳ **Monitor Performance**: Marking 66 notifications individually may take a few seconds
---
## 📝 Summary
**Fixes Applied**:
1. ✅ Fixed `markAsRead` method name and parameters
2. ✅ Implemented `markAllAsRead` using individual marking approach
3. ✅ Added comprehensive logging
**Status**: Ready for testing after `rm -rf .next && npm run build`
**Expected Result**: Mark all as read should now work correctly
---
**Generated**: 2026-01-01

View File

@ -1,646 +0,0 @@
# Audit Complet des Fichiers Login/Logout - Analyse des Cookies
## 📋 Vue d'ensemble
Ce document liste **TOUS** les fichiers de code impliqués dans le processus de **login** et **logout** du dashboard Next.js avec NextAuth et Keycloak, avec une analyse approfondie de la gestion des cookies.
---
## 🔐 FICHIERS CORE - Configuration NextAuth
### 1. **`app/api/auth/[...nextauth]/route.ts`**
**Rôle** : Route handler NextAuth pour tous les endpoints d'authentification
**Cookies gérés** :
- `next-auth.session-token` (ou variantes sécurisées) - Cookie principal de session NextAuth
- `next-auth.csrf-token` - Token CSRF pour la sécurité
- `next-auth.state` - État OAuth pour le flow Keycloak
- `next-auth.callback-url` - URL de callback après authentification
**Fonctions** :
- Gère `GET/POST /api/auth/signin` → Redirige vers Keycloak
- Gère `GET/POST /api/auth/signout` → Déconnecte et nettoie les cookies
- Gère `GET /api/auth/session` → Lit le cookie de session
- Gère `GET /api/auth/callback/keycloak` → Reçoit le code OAuth de Keycloak
- Gère `GET /api/auth/csrf` → Génère le token CSRF
- Gère `GET /api/auth/providers` → Liste les providers disponibles
**Cookies créés/supprimés** :
- **Login** : Crée `next-auth.session-token` (HttpOnly, Secure, SameSite=Lax)
- **Logout** : Supprime `next-auth.session-token` via `signOut()`
---
### 2. **`app/api/auth/options.ts`** ⭐ **FICHIER CRITIQUE**
**Rôle** : Configuration principale de NextAuth avec Keycloak
**Cookies gérés** :
- Tous les cookies NextAuth (via configuration implicite)
- Les tokens Keycloak sont stockés dans le JWT (pas de cookies séparés)
**Fonctions clés** :
#### `refreshAccessToken(token)` (lignes 83-139)
- **Cookies utilisés** : Aucun directement, mais utilise `refreshToken` du JWT
- **Comportement** :
- Appelle Keycloak `/token` endpoint pour rafraîchir
- Détecte si la session Keycloak est invalide (erreur `invalid_grant`)
- Retourne `error: "SessionNotActive"` si session Keycloak expirée
#### `jwt` callback (lignes 196-282)
- **Cookies utilisés** : Lit `next-auth.session-token` (décrypté par NextAuth)
- **Comportement** :
- **Initial login** : Stocke `accessToken`, `refreshToken`, `idToken` dans le JWT
- **Subsequent requests** : Vérifie expiration, rafraîchit si nécessaire
- **Token expired** : Appelle `refreshAccessToken()`
- **Session invalidated** : Retourne token avec `error: "SessionNotActive"`
#### `session` callback (lignes 283-324)
- **Cookies utilisés** : Lit le JWT depuis `next-auth.session-token`
- **Comportement** :
- Si `token.error === "SessionNotActive"` → Retourne `null` (force logout)
- Sinon, construit la session avec les données utilisateur
**Configuration cookies** :
```typescript
session: {
strategy: "jwt",
maxAge: 4 * 60 * 60, // 4 heures
}
// Les cookies sont gérés automatiquement par NextAuth
// Pas de configuration explicite des cookies dans ce fichier
```
**Paramètres OAuth** :
```typescript
authorization: {
params: {
scope: "openid profile email roles",
prompt: "login" // Force le prompt de login même si SSO existe
}
}
```
---
## 🚪 FICHIERS PAGES - Interface Utilisateur
### 3. **`app/signin/page.tsx`** ⭐ **FICHIER CRITIQUE**
**Rôle** : Page de connexion avec logique complexe de détection de logout
**Cookies analysés** :
- `next-auth.session-token` (ou variantes) - Vérifie si cookie existe mais invalide
- `logout_in_progress` - Cookie temporaire (60s) pour marquer logout en cours
- Cookies Keycloak (via `document.cookie`)
**Fonctions clés** :
#### Détection de logout/session invalide (lignes 17-67)
```typescript
// Vérifie les cookies NextAuth
const hasInvalidSessionCookie = document.cookie
.split(';')
.some(c => c.trim().startsWith('next-auth.session-token=') ||
c.trim().startsWith('__Secure-next-auth.session-token=') ||
c.trim().startsWith('__Host-next-auth.session-token='));
// Si cookie existe mais status = unauthenticated → Session invalidée
if (status === 'unauthenticated' && hasInvalidSessionCookie) {
sessionStorage.setItem('session_invalidated', 'true');
// Empêche auto-login
}
```
#### Auto-login (lignes 69-124)
- **Condition** : Seulement si **PAS** de cookie de session existant
- **Comportement** : Appelle `signIn("keycloak")` après 1 seconde
- **Protection** : Ne s'exécute pas si `logout_in_progress` ou `session_invalidated`
#### Initialisation storage (lignes 126-158)
- Appelle `/api/storage/init` après authentification réussie
- Force reload pour mettre à jour la session
**Cookies créés/supprimés** :
- **Aucun cookie créé directement** (NextAuth gère ça)
- **Supprime** : `sessionStorage` items (`just_logged_out`, `session_invalidated`)
---
### 4. **`app/signout/page.tsx`**
**Rôle** : Page de déconnexion (simple wrapper)
**Cookies** : Aucune manipulation directe, délègue à `SignOutHandler`
---
## 🔧 FICHIERS COMPOSANTS - Logique Métier
### 5. **`components/auth/signout-handler.tsx`** ⭐ **FICHIER CRITIQUE**
**Rôle** : Gère la déconnexion complète (NextAuth + Keycloak)
**Cookies manipulés** :
#### Cookies NextAuth (ligne 23)
```typescript
clearAuthCookies(); // Supprime next-auth.session-token
```
#### Cookies Keycloak (ligne 25)
```typescript
clearKeycloakCookies(); // Tente de supprimer KEYCLOAK_SESSION, etc.
```
#### Cookie de flag (ligne 16)
```typescript
document.cookie = 'logout_in_progress=true; path=/; max-age=60';
```
**Flow de logout** :
1. Marque logout en cours (`sessionStorage` + cookie)
2. **Supprime cookies NextAuth** (`clearAuthCookies()`)
3. **Tente de supprimer cookies Keycloak** (`clearKeycloakCookies()`)
4. Appelle `/api/auth/end-sso-session` (Admin API Keycloak)
5. Appelle `signOut()` NextAuth (supprime cookie serveur)
6. Redirige vers Keycloak logout endpoint avec `id_token_hint`
7. Keycloak redirige vers `/signin?logout=true`
**Cookies supprimés** :
- `next-auth.session-token` (et variantes)
- `KEYCLOAK_SESSION`, `KEYCLOAK_IDENTITY`, `AUTH_SESSION_ID` (si même domaine)
- `logout_in_progress` (expire après 60s)
---
### 6. **`components/main-nav.tsx`** (lignes 364-446)
**Rôle** : Bouton de déconnexion dans la navigation
**Cookies** : Même logique que `signout-handler.tsx`
- Appelle `clearAuthCookies()` et `clearKeycloakCookies()`
- Crée cookie `logout_in_progress`
- Même flow que `SignOutHandler`
---
### 7. **`components/layout/layout-wrapper.tsx`** ⭐ **FICHIER CRITIQUE**
**Rôle** : Écoute les messages de logout depuis les iframes
**Cookies manipulés** :
- Même pattern que `signout-handler.tsx`
- Gère les logout déclenchés par les iframes via `postMessage`
**Fonction clé** (lignes 23-112) :
```typescript
const handleMessage = async (event: MessageEvent) => {
if (event.data.type === 'KEYCLOAK_LOGOUT' || event.data.type === 'LOGOUT') {
// Même flow que signout-handler.tsx
clearAuthCookies();
clearKeycloakCookies();
// ... logout complet
}
};
```
**Cookies** : Identique à `signout-handler.tsx`
---
### 8. **`components/auth/auth-check.tsx`**
**Rôle** : Guard d'authentification côté client
**Cookies** : Aucune manipulation directe
- Utilise `useSession()` qui lit `next-auth.session-token`
- Redirige vers `/signin` si `status === "unauthenticated"`
---
### 9. **`components/providers.tsx`**
**Rôle** : Wrapper `SessionProvider` pour NextAuth
**Cookies** : Aucune manipulation, fournit le contexte de session
---
## 🛠️ FICHIERS UTILITAIRES - Gestion Sessions/Cookies
### 10. **`lib/session.ts`** ⭐ **FICHIER CRITIQUE**
**Rôle** : Utilitaires pour gérer les cookies et sessions
#### `clearAuthCookies()` (lignes 93-108)
**Cookies supprimés** :
```typescript
// Supprime SEULEMENT les cookies de session, PAS les cookies OAuth
if (cookieName.startsWith('next-auth.session-token') ||
cookieName.startsWith('__Secure-next-auth.session-token') ||
cookieName.startsWith('__Host-next-auth.session-token')) {
document.cookie = `${cookieName}=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;`;
}
```
**Important** : Ne supprime **PAS** :
- `next-auth.csrf-token` (nécessaire pour OAuth)
- `next-auth.state` (nécessaire pour OAuth flow)
- `next-auth.callback-url`
#### `clearKeycloakCookies()` (lignes 115-154)
**Cookies Keycloak tentés de supprimer** :
```typescript
const keycloakCookieNames = [
'KEYCLOAK_SESSION',
'KEYCLOAK_SESSION_LEGACY',
'KEYCLOAK_IDENTITY',
'KEYCLOAK_IDENTITY_LEGACY',
'AUTH_SESSION_ID',
'KC_RESTART',
'KC_RESTART_LEGACY'
];
```
**Limitation** : Ces cookies sont sur le domaine Keycloak, donc **ne peuvent pas être supprimés** depuis le domaine du dashboard (même origine). Cette fonction tente plusieurs combinaisons domain/path mais échouera si Keycloak est sur un domaine différent.
#### `invalidateServiceTokens()` (lignes 53-91)
**Cookies** : Aucun, invalide les tokens de services externes (RocketChat, Leantime)
---
### 11. **`lib/keycloak.ts`**
**Rôle** : Client Admin Keycloak pour gestion serveur
**Cookies** : Aucune manipulation directe
- Utilisé par `/api/auth/end-sso-session` pour terminer la session SSO
---
## 🌐 FICHIERS API - Endpoints Serveur
### 12. **`app/api/auth/end-sso-session/route.ts`** ⭐ **FICHIER CRITIQUE**
**Rôle** : Termine la session SSO Keycloak via Admin API
**Cookies** : Aucune manipulation directe
- Utilise Keycloak Admin API pour logout utilisateur
- **Important** : Termine la session **realm-wide**, pas seulement client
**Flow** :
1. Lit `next-auth.session-token` via `getServerSession()`
2. Extrait `idToken` de la session
3. Décode `idToken` pour obtenir `userId`
4. Appelle `adminClient.users.logout({ id: userId })`
5. Keycloak supprime **toutes** les sessions de l'utilisateur
**Impact cookies** :
- Keycloak supprime ses cookies côté serveur
- Les cookies Keycloak deviennent invalides (mais restent dans le navigateur jusqu'à expiration)
---
### 13. **`app/api/auth/refresh-keycloak-session/route.ts`**
**Rôle** : Rafraîchit la session Keycloak (si existe)
**Cookies** : Lit `next-auth.session-token` via `getServerSession()`
---
## 📄 FICHIERS LAYOUT - Structure Application
### 14. **`app/layout.tsx`**
**Rôle** : Layout racine avec vérification de session serveur
**Cookies** : Lit `next-auth.session-token` via `getServerSession(authOptions)`
- Passe `isAuthenticated` à `LayoutWrapper`
- Détermine si c'est la page signin
---
### 15. **`app/components/responsive-iframe.tsx`** (lignes 109-153)
**Rôle** : Composant iframe avec écoute de messages logout
**Cookies** : Aucune manipulation directe
- Écoute `postMessage` depuis iframes
- Déclenche logout si message `KEYCLOAK_LOGOUT` reçu
- **Note** : Logique similaire à `layout-wrapper.tsx` mais dans le composant iframe
---
## 📝 FICHIERS TYPES - Définitions TypeScript
### 16. **`types/next-auth.d.ts`**
**Rôle** : Extensions TypeScript pour NextAuth
**Cookies** : Aucune manipulation, définit les types de session/JWT
---
## 🔍 ANALYSE DÉTAILLÉE DES COOKIES
### Cookies NextAuth
#### 1. **`next-auth.session-token`** (ou variantes sécurisées)
- **Domaine** : Domaine du dashboard
- **Path** : `/`
- **HttpOnly** : `true` (sécurité)
- **Secure** : `true` (si HTTPS)
- **SameSite** : `Lax` (par défaut)
- **Contenu** : JWT encrypté contenant :
- `accessToken` (Keycloak)
- `refreshToken` (Keycloak)
- `idToken` (Keycloak)
- Données utilisateur (id, email, roles, etc.)
- **Durée** : 4 heures (configuré dans `options.ts`)
- **Créé** : Lors de `signIn()` réussi
- **Supprimé** : Lors de `signOut()` ou expiration
- **Variantes** :
- `__Secure-next-auth.session-token` (si HTTPS)
- `__Host-next-auth.session-token` (si domaine racine)
#### 2. **`next-auth.csrf-token`**
- **Domaine** : Domaine du dashboard
- **Path** : `/`
- **HttpOnly** : `true`
- **Secure** : `true` (si HTTPS)
- **SameSite** : `Lax`
- **Contenu** : Token CSRF pour protection OAuth
- **Durée** : Session (supprimé à la fermeture du navigateur)
- **Créé** : Lors de la première requête OAuth
- **Supprimé** : À la fermeture du navigateur
- **Important** : **N'EST PAS supprimé** par `clearAuthCookies()` (nécessaire pour OAuth)
#### 3. **`next-auth.state`**
- **Domaine** : Domaine du dashboard
- **Path** : `/`
- **HttpOnly** : `true`
- **Secure** : `true` (si HTTPS)
- **SameSite** : `Lax`
- **Contenu** : État OAuth pour validation du callback
- **Durée** : Court (pendant le flow OAuth)
- **Créé** : Lors de `signIn()` (début flow OAuth)
- **Supprimé** : Après validation du callback OAuth
- **Important** : **N'EST PAS supprimé** par `clearAuthCookies()` (nécessaire pour OAuth)
#### 4. **`next-auth.callback-url`**
- **Domaine** : Domaine du dashboard
- **Path** : `/`
- **HttpOnly** : `true`
- **Secure** : `true` (si HTTPS)
- **SameSite** : `Lax`
- **Contenu** : URL de redirection après authentification
- **Durée** : Court (pendant le flow OAuth)
- **Créé** : Lors de `signIn()` avec `callbackUrl`
- **Supprimé** : Après redirection
### Cookies Keycloak
#### 1. **`KEYCLOAK_SESSION`**
- **Domaine** : Domaine Keycloak (peut être différent du dashboard)
- **Path** : `/` ou `/realms/{realm}`
- **HttpOnly** : `true`
- **Secure** : `true` (si HTTPS)
- **SameSite** : `Lax` ou `None` (pour cross-site)
- **Contenu** : Identifiant de session SSO Keycloak
- **Durée** : Configuré dans Keycloak (typiquement 30 min - quelques heures)
- **Créé** : Lors de l'authentification Keycloak
- **Supprimé** : Lors de logout Keycloak ou expiration
- **Problème** : **Ne peut pas être supprimé** depuis le dashboard si domaine différent
#### 2. **`KEYCLOAK_IDENTITY`**
- **Domaine** : Domaine Keycloak
- **Path** : `/` ou `/realms/{realm}`
- **HttpOnly** : `true`
- **Secure** : `true`
- **SameSite** : `Lax` ou `None`
- **Contenu** : Identité utilisateur Keycloak
- **Durée** : Même que `KEYCLOAK_SESSION`
- **Créé** : Lors de l'authentification Keycloak
- **Supprimé** : Lors de logout Keycloak ou expiration
#### 3. **`AUTH_SESSION_ID`**
- **Domaine** : Domaine Keycloak
- **Path** : `/` ou `/realms/{realm}`
- **HttpOnly** : `true`
- **Secure** : `true`
- **SameSite** : `Lax` ou `None`
- **Contenu** : ID de session d'authentification
- **Durée** : Court (pendant le flow d'authentification)
- **Créé** : Lors du début du flow d'authentification
- **Supprimé** : Après authentification réussie ou échec
### Cookies Custom
#### 1. **`logout_in_progress`**
- **Domaine** : Domaine du dashboard
- **Path** : `/`
- **HttpOnly** : `false` (accessible via JavaScript)
- **Secure** : `false`
- **SameSite** : Non défini
- **Contenu** : `"true"`
- **Durée** : 60 secondes (`max-age=60`)
- **Créé** : Lors de `signOut()` (dans `signout-handler.tsx`, `main-nav.tsx`, `layout-wrapper.tsx`)
- **Supprimé** : Expire après 60s ou manuellement
- **Usage** : Empêche l'auto-login après logout
---
## 🔄 FLOW COMPLET DE LOGIN
### Étape 1 : Utilisateur accède à `/signin`
**Fichier** : `app/signin/page.tsx`
**Cookies** :
- Vérifie si `next-auth.session-token` existe
- Si existe mais `status === "unauthenticated"` → Session invalidée
- Si n'existe pas → Nouvel utilisateur, déclenche auto-login
### Étape 2 : Auto-login déclenché
**Fichier** : `app/signin/page.tsx` (ligne 118)
**Action** : `signIn("keycloak", { callbackUrl: "/" })`
**Cookies créés** :
- `next-auth.csrf-token` (par NextAuth)
- `next-auth.state` (par NextAuth)
- `next-auth.callback-url` (par NextAuth)
### Étape 3 : Redirection vers Keycloak
**Fichier** : `app/api/auth/[...nextauth]/route.ts` → NextAuth interne
**URL** : `${KEYCLOAK_ISSUER}/protocol/openid-connect/auth?...&prompt=login`
**Cookies Keycloak créés** :
- `AUTH_SESSION_ID` (par Keycloak)
### Étape 4 : Authentification Keycloak
**Fichier** : Keycloak serveur
**Cookies Keycloak créés** :
- `KEYCLOAK_SESSION` (session SSO)
- `KEYCLOAK_IDENTITY` (identité utilisateur)
### Étape 5 : Callback OAuth
**Fichier** : `app/api/auth/callback/keycloak` (géré par NextAuth)
**Cookies** :
- `next-auth.state` vérifié et supprimé
- `next-auth.callback-url` lu et utilisé
### Étape 6 : JWT Callback
**Fichier** : `app/api/auth/options.ts``jwt` callback (ligne 196)
**Cookies** :
- Lit `next-auth.session-token` (décrypté)
- Stocke tokens Keycloak dans le JWT
- **Crée** `next-auth.session-token` (nouveau JWT avec tokens)
### Étape 7 : Session Callback
**Fichier** : `app/api/auth/options.ts``session` callback (ligne 283)
**Cookies** : Lit `next-auth.session-token` pour construire la session
### Étape 8 : Redirection vers `/`
**Fichier** : `app/signin/page.tsx` (ligne 72)
**Cookies** : `next-auth.session-token` maintenant présent
### Étape 9 : Initialisation Storage
**Fichier** : `app/signin/page.tsx` (lignes 126-158)
**Action** : Appelle `/api/storage/init`
**Cookies** : Utilise `next-auth.session-token` (via `getServerSession()`)
---
## 🔄 FLOW COMPLET DE LOGOUT
### Étape 1 : Utilisateur clique "Déconnexion"
**Fichiers** :
- `components/main-nav.tsx` (ligne 364)
- OU `components/auth/signout-handler.tsx` (ligne 11)
- OU `components/layout/layout-wrapper.tsx` (ligne 32) si message iframe
**Cookies créés** :
- `logout_in_progress=true; path=/; max-age=60` (ligne 16/369/38)
- `sessionStorage.setItem('just_logged_out', 'true')` (ligne 14/367/37)
### Étape 2 : Suppression cookies NextAuth
**Fichier** : `lib/session.ts``clearAuthCookies()` (ligne 93)
**Cookies supprimés** :
- `next-auth.session-token` (et variantes)
- **PAS** `next-auth.csrf-token` (nécessaire pour OAuth)
- **PAS** `next-auth.state` (nécessaire pour OAuth)
### Étape 3 : Tentative suppression cookies Keycloak
**Fichier** : `lib/session.ts``clearKeycloakCookies()` (ligne 115)
**Cookies tentés de supprimer** :
- `KEYCLOAK_SESSION`, `KEYCLOAK_IDENTITY`, etc.
- **Limitation** : Échoue si Keycloak sur domaine différent
### Étape 4 : Fin de session SSO via Admin API
**Fichier** : `app/api/auth/end-sso-session/route.ts` (ligne 15)
**Action** : `adminClient.users.logout({ id: userId })`
**Cookies** :
- Keycloak supprime **toutes** les sessions côté serveur
- Les cookies Keycloak deviennent invalides (mais restent dans le navigateur)
### Étape 5 : SignOut NextAuth
**Fichier** : `components/auth/signout-handler.tsx` (ligne 52)
**Action** : `signOut({ callbackUrl: "/signin?logout=true", redirect: false })`
**Cookies supprimés** :
- `next-auth.session-token` (supprimé côté serveur)
### Étape 6 : Redirection vers Keycloak Logout
**Fichier** : `components/auth/signout-handler.tsx` (ligne 58)
**URL** : `${KEYCLOAK_ISSUER}/protocol/openid-connect/logout?...&id_token_hint=...&kc_action=LOGOUT`
**Cookies Keycloak** :
- Keycloak supprime ses cookies (si même domaine ou cross-domain configuré)
### Étape 7 : Redirection vers `/signin?logout=true`
**Fichier** : Keycloak → `app/signin/page.tsx`
**Cookies** :
- `next-auth.session-token` : Supprimé
- `KEYCLOAK_SESSION` : Peut encore exister (si domaine différent)
- `logout_in_progress` : Existe encore (60s)
### Étape 8 : Détection logout dans signin
**Fichier** : `app/signin/page.tsx` (lignes 17-67)
**Cookies vérifiés** :
- `logout_in_progress` (ligne 19)
- `next-auth.session-token` (ligne 25-29)
- `sessionStorage.getItem('just_logged_out')` (ligne 20)
**Comportement** :
- Si `logout=true` dans URL → Affiche message "Vous avez été déconnecté"
- Si cookie session existe mais invalide → Empêche auto-login
- Si pas de cookie session → Auto-login après 1s (nouvel utilisateur)
---
## ⚠️ PROBLÈMES IDENTIFIÉS
### Problème 1 : Cookies Keycloak non supprimables
**Fichier** : `lib/session.ts``clearKeycloakCookies()`
**Cause** : Cookies Keycloak sur domaine différent
**Impact** : Les cookies Keycloak persistent après logout dashboard
**Solution actuelle** : Appel à Keycloak logout endpoint avec `id_token_hint`
### Problème 2 : Session SSO Keycloak peut persister
**Fichier** : `app/api/auth/options.ts` (ligne 154)
**Cause** : `prompt=login` force le prompt, mais si SSO session existe, Keycloak peut auto-authentifier
**Impact** : Utilisateur peut être reconnecté automatiquement sans credentials
**Solution actuelle** : `prompt=login` + appel Admin API pour terminer session SSO
### Problème 3 : Détection session invalide complexe
**Fichier** : `app/signin/page.tsx` (lignes 17-67)
**Cause** : Logique complexe pour détecter si session invalidée vs nouvel utilisateur
**Impact** : Auto-login peut se déclencher incorrectement
**Solution actuelle** : Vérification multiple (cookies, sessionStorage, URL params)
### Problème 4 : Race condition logout/login
**Fichier** : `app/signin/page.tsx` (lignes 69-124)
**Cause** : Auto-login avec délai de 1s peut se déclencher pendant logout
**Impact** : Utilisateur peut être reconnecté immédiatement après logout
**Solution actuelle** : Flags `logout_in_progress` et `session_invalidated`
---
## 📊 RÉSUMÉ DES FICHIERS PAR CATÉGORIE
### Configuration Core (2 fichiers)
1. `app/api/auth/[...nextauth]/route.ts`
2. `app/api/auth/options.ts`
### Pages (2 fichiers)
3. `app/signin/page.tsx`
4. `app/signout/page.tsx`
### Composants Auth (4 fichiers)
5. `components/auth/signout-handler.tsx`
6. `components/auth/auth-check.tsx`
7. `components/auth/signin-form.tsx` (si existe)
8. `components/auth/login-card.tsx` (si existe)
### Composants Layout (2 fichiers)
9. `components/layout/layout-wrapper.tsx`
10. `components/providers.tsx`
### Navigation (1 fichier)
11. `components/main-nav.tsx`
### Utilitaires (2 fichiers)
12. `lib/session.ts`
13. `lib/keycloak.ts`
### API Routes (2 fichiers)
14. `app/api/auth/end-sso-session/route.ts`
15. `app/api/auth/refresh-keycloak-session/route.ts`
### Layout Root (1 fichier)
16. `app/layout.tsx`
### Iframe (1 fichier)
17. `app/components/responsive-iframe.tsx`
### Types (1 fichier)
18. `types/next-auth.d.ts`
**Total : 18 fichiers principaux**
---
## 🎯 FICHIERS CRITIQUES (⭐)
Les fichiers marqués ⭐ sont **critiques** pour le flow login/logout :
1. **`app/api/auth/options.ts`** - Configuration NextAuth, callbacks JWT/session
2. **`app/signin/page.tsx`** - Logique complexe de détection logout/auto-login
3. **`components/auth/signout-handler.tsx`** - Flow complet de logout
4. **`lib/session.ts`** - Gestion des cookies (suppression)
5. **`components/layout/layout-wrapper.tsx`** - Écoute logout depuis iframes
6. **`components/main-nav.tsx`** - Bouton logout
7. **`app/api/auth/end-sso-session/route.ts`** - Termine session SSO Keycloak
---
## 📝 NOTES IMPORTANTES
1. **Cookies NextAuth** : Gérés automatiquement par NextAuth, pas besoin de manipulation manuelle sauf pour suppression
2. **Cookies Keycloak** : Ne peuvent pas être supprimés depuis le dashboard si domaine différent (limitation navigateur)
3. **Session SSO** : Doit être terminée via Admin API Keycloak pour être complètement supprimée
4. **Auto-login** : Logique complexe pour distinguer nouvel utilisateur vs session invalidée
5. **Iframe logout** : Communication via `postMessage` pour synchroniser logout
---
**Document créé le** : $(date)
**Dernière mise à jour** : Analyse complète du workflow login/logout avec focus sur les cookies

View File

@ -1,305 +0,0 @@
# Full Logout/Login Flow Trace
## Issue 1: No Credentials Asked After Logout/Login
### Current Flow Trace
#### Step 1: User Clicks Logout
```
Location: components/main-nav.tsx (line 364)
Action: onClick handler triggered
1. sessionStorage.setItem('just_logged_out', 'true')
2. document.cookie = 'logout_in_progress=true; path=/; max-age=60'
3. clearAuthCookies() - Clears NextAuth cookies client-side
4. signOut({ callbackUrl: '/signin?logout=true', redirect: false })
→ Calls NextAuth /api/auth/signout endpoint
→ Clears NextAuth session cookie server-side
5. window.location.replace(keycloakLogoutUrl)
→ Redirects to: ${KEYCLOAK_ISSUER}/protocol/openid-connect/logout
→ Parameters:
- post_logout_redirect_uri: /signin?logout=true
- id_token_hint: <ID token>
```
#### Step 2: Keycloak Logout Endpoint
```
Location: Keycloak Server
URL: ${KEYCLOAK_ISSUER}/protocol/openid-connect/logout
Expected Behavior:
- Keycloak should invalidate the session
- Keycloak should clear session cookies:
- KEYCLOAK_SESSION (main session cookie)
- KEYCLOAK_SESSION_LEGACY
- KEYCLOAK_IDENTITY (identity cookie)
- KEYCLOAK_IDENTITY_LEGACY
- AUTH_SESSION_ID
- KC_RESTART (if exists)
ACTUAL BEHAVIOR (PROBLEM):
- Keycloak logout endpoint with id_token_hint SHOULD clear cookies
- BUT: Keycloak might have SSO session that persists across clients
- OR: Cookies might not be cleared if domain/path mismatch
- OR: Keycloak might set new cookies during redirect
```
#### Step 3: Redirect Back to Signin
```
Location: app/signin/page.tsx
URL: /signin?logout=true
1. Component mounts
2. useEffect checks for logout flag (line 16-45)
- Sets isLogoutRedirect.current = true
- Removes 'just_logged_out' from sessionStorage
- Clears OAuth params from URL
3. Shows logout message with "Se connecter" button
4. User clicks "Se connecter" button (line 143-148)
- Calls: signIn("keycloak", { callbackUrl: "/" })
```
#### Step 4: Keycloak Authorization Request
```
Location: NextAuth → Keycloak
URL: ${KEYCLOAK_ISSUER}/protocol/openid-connect/auth
Parameters sent:
- client_id: KEYCLOAK_CLIENT_ID
- redirect_uri: ${NEXTAUTH_URL}/api/auth/callback/keycloak
- response_type: code
- scope: openid profile email roles
- state: <random state>
- code_challenge: (if PKCE enabled)
KEYCLOAK BEHAVIOR:
1. Keycloak receives authorization request
2. Keycloak checks for existing session cookies
3. IF Keycloak session cookies still exist:
→ Keycloak finds valid SSO session
→ Keycloak auto-authenticates user (no login prompt)
→ Keycloak redirects back with authorization code
4. IF Keycloak session cookies are cleared:
→ Keycloak shows login page
→ User enters credentials
→ Keycloak creates new session
→ Keycloak redirects back with authorization code
PROBLEM IDENTIFIED:
- Keycloak logout endpoint might not be clearing ALL session cookies
- OR: Keycloak has SSO session that persists (separate from client session)
- OR: Keycloak sets new cookies during the logout redirect process
- OR: Browser is preserving cookies due to SameSite/domain issues
```
### Root Cause Analysis
**Problem**: Keycloak SSO Session Persistence
Keycloak maintains two types of sessions:
1. **Client Session** (per OAuth client) - Cleared by logout endpoint
2. **SSO Session** (realm-wide) - May persist even after client logout
When you call:
```
GET /protocol/openid-connect/logout?id_token_hint=...&post_logout_redirect_uri=...
```
Keycloak behavior:
- ✅ Clears the **client session** for that specific OAuth client
- ✅ Invalidates tokens for that client
- ❌ **MIGHT NOT** clear the **SSO session** (realm-wide session)
- ❌ **MIGHT NOT** clear all session cookies if cookies are set with different domain/path
**Why SSO Session Persists:**
- Keycloak SSO session is realm-wide, not client-specific
- Multiple clients can share the same SSO session
- Logging out from one client doesn't necessarily log out from the realm
- The SSO session cookie (KEYCLOAK_SESSION) might persist
**When User Clicks "Se connecter":**
1. Redirects to Keycloak authorization endpoint
2. Keycloak checks for SSO session cookie
3. If SSO session cookie exists → Auto-authenticates (no credentials asked)
4. If SSO session cookie cleared → Shows login page
---
## Issue 2: Cannot Logout from Iframe Application
### Current Flow Trace
#### Step 1: User in Iframe Application
```
Location: Iframe application (e.g., /parole, /gite, etc.)
State:
- Dashboard has NextAuth session
- Keycloak session cookies exist
- Iframe app authenticated via Keycloak cookies
```
#### Step 2: User Clicks Logout in Iframe
```
Location: Iframe application's logout button
Action: Iframe app's logout handler
Possible Scenarios:
Scenario A: Iframe calls its own logout endpoint
- Iframe app might call: POST /api/logout (iframe app's endpoint)
- This might clear iframe app's session
- BUT: Keycloak session cookies might still exist
- Result: Iframe app logs out, but Keycloak session persists
Scenario B: Iframe calls Keycloak logout
- Iframe app might call: GET ${KEYCLOAK_ISSUER}/protocol/openid-connect/logout
- Keycloak clears session cookies
- BUT: NextAuth dashboard session still exists
- Result: Keycloak session cleared, but dashboard still logged in
Scenario C: Iframe doesn't have logout
- Iframe app might not have logout functionality
- User stuck in iframe with no way to logout
```
#### Step 3: What Happens After Iframe Logout
```
If iframe calls Keycloak logout:
1. Keycloak invalidates session
2. Keycloak clears session cookies
3. NextAuth dashboard still has valid JWT (30-day expiration)
4. NextAuth doesn't know Keycloak session was cleared
5. Dashboard widgets still show (using NextAuth session)
6. Iframe apps can't authenticate (Keycloak cookies cleared)
```
### Root Cause Analysis
**Problem**: No Communication Between Iframe and Dashboard
When iframe app logs out:
1. **Iframe app** calls Keycloak logout → Clears Keycloak cookies
2. **Dashboard** doesn't know about this → NextAuth session still valid
3. **Dashboard widgets** continue to work (using NextAuth session)
4. **Iframe apps** can't authenticate (Keycloak cookies gone)
**Why Dashboard Doesn't Know:**
- NextAuth session is independent of Keycloak session cookies
- NextAuth JWT has 30-day expiration
- No mechanism to detect Keycloak session invalidation from iframe
- Dashboard only detects invalidation when trying to refresh tokens
**Why Iframe Can't Logout Properly:**
- Iframe apps rely on Keycloak cookies for SSO
- If iframe calls Keycloak logout, it clears cookies
- But dashboard session persists
- If iframe doesn't call logout, user can't logout from iframe
- No way for iframe to trigger dashboard logout
---
## Key Findings
### Finding 1: Keycloak SSO Session Persistence
- **Issue**: Keycloak logout endpoint might not clear SSO session
- **Evidence**: User auto-authenticates without credentials after logout
- **Root Cause**: SSO session cookie persists after client logout
- **Impact**: Security issue - user should be asked for credentials
### Finding 2: Missing `prompt=login` Parameter
- **Issue**: When calling `signIn("keycloak")`, no `prompt=login` parameter is sent
- **Evidence**: Keycloak auto-authenticates if SSO session exists
- **Root Cause**: NextAuth Keycloak provider doesn't force login prompt
- **Impact**: User bypasses credential check
### Finding 3: Iframe Logout Isolation
- **Issue**: Iframe logout doesn't affect dashboard session
- **Evidence**: Dashboard widgets still show after iframe logout
- **Root Cause**: No communication mechanism between iframe and dashboard
- **Impact**: Inconsistent logout state
### Finding 4: No Cross-Origin Logout Communication
- **Issue**: Iframe can't trigger dashboard logout
- **Evidence**: User stuck in iframe after logout
- **Root Cause**: No postMessage or other communication mechanism
- **Impact**: Poor user experience
---
## Flow Diagram
### Current Logout Flow (Dashboard)
```
User clicks logout
Clear NextAuth cookies
Call NextAuth signOut()
Redirect to Keycloak logout
Keycloak clears client session
Keycloak MAY clear SSO session (not guaranteed)
Redirect to /signin?logout=true
User clicks "Se connecter"
Redirect to Keycloak auth
Keycloak checks SSO session
IF SSO session exists → Auto-authenticate (NO CREDENTIALS)
IF SSO session cleared → Show login page
```
### Current Iframe Logout Flow
```
User in iframe clicks logout
Iframe app calls logout (varies by app)
IF calls Keycloak logout:
→ Keycloak clears session cookies
→ Dashboard session still valid
→ Widgets still show
→ Iframe can't authenticate
IF doesn't call logout:
→ User stuck in iframe
→ No way to logout
```
---
## Recommendations
### For Issue 1 (No Credentials After Logout)
1. **Add `prompt=login` parameter** to Keycloak authorization request
- Forces Keycloak to show login page even if SSO session exists
- Location: `app/api/auth/options.ts` - KeycloakProvider authorization params
2. **Clear Keycloak SSO session explicitly**
- Add `kc_action=LOGOUT` parameter to logout URL
- Or call Keycloak admin API to end SSO session
3. **Clear Keycloak cookies client-side**
- After Keycloak logout redirect, clear any remaining Keycloak cookies
- Check for cookies with Keycloak domain
### For Issue 2 (Iframe Logout)
1. **Implement postMessage communication**
- Iframe sends logout message to parent
- Dashboard listens for logout messages
- Dashboard triggers logout when iframe logs out
2. **Detect Keycloak session invalidation**
- Poll Keycloak session status
- Detect when Keycloak cookies are cleared
- Automatically logout dashboard
3. **Unified logout endpoint**
- Create API endpoint that logs out both dashboard and Keycloak
- Iframe apps call this endpoint
- Ensures synchronized logout

View File

@ -1,239 +0,0 @@
# Log Analysis & Feedback Report
**Date**: 2026-01-01
**Log File**: `log`
**Analysis Scope**: Application startup, notifications, session management, API calls
---
## 🔴 Critical Issues
### 1. Excessive Session Callback Logging (HIGH PRIORITY)
**Problem**:
- **10+ session callbacks** triggered in a short period
- Each `getServerSession()` call triggers verbose logging
- Logs show `=== SESSION CALLBACK START ===` and `=== SESSION CALLBACK END ===` repeatedly
**Root Cause**:
- Every API route calls `getServerSession(authOptions)`
- Root layout (`app/layout.tsx`) also calls it
- Session callback has extensive logging (lines 407-415 in `app/api/auth/options.ts`)
**Impact**:
- ⚠️ **Performance**: Unnecessary logging overhead on every request
- ⚠️ **Log Noise**: Makes it hard to find actual issues
- ⚠️ **Debugging**: Difficult to identify real problems
**Recommendation**:
```typescript
// In app/api/auth/options.ts, line 405-415
async session({ session, token }) {
try {
// Only log in development or when there's an error
if (process.env.NODE_ENV === 'development' || token.error) {
console.log('=== SESSION CALLBACK START ===');
console.log('Token error:', token.error);
// ... rest of logging
}
// Or use a debug flag
const DEBUG_SESSION = process.env.DEBUG_SESSION === 'true';
if (DEBUG_SESSION) {
console.log('=== SESSION CALLBACK START ===');
// ... logging
}
// ... rest of callback
}
}
```
**Priority**: 🔴 **HIGH** - Should be fixed immediately
---
### 2. Missing markAsRead/markAllAsRead Logs
**Problem**:
- No API calls to `/api/notifications/[id]/read` or `/api/notifications/read-all` in the log
- User reported notification count not updating after marking as read
**Possible Causes**:
1. User didn't actually mark notifications as read during this log session
2. API calls are failing silently (network errors, CORS, etc.)
3. Client-side code isn't calling the API correctly
4. API routes aren't logging their calls
**Investigation Steps**:
1. Add logging to mark-as-read API routes:
```typescript
// In app/api/notifications/[id]/read/route.ts
export async function POST(request: Request, context: { params: Promise<{ id: string }> }) {
console.log('[NOTIFICATION_API] Mark as read called', { id: context.params?.id });
// ... rest of code
}
```
2. Check browser console for client-side errors
3. Verify network tab shows the API calls being made
4. Test the mark-as-read functionality while monitoring logs
**Priority**: 🟡 **MEDIUM** - Needs investigation
---
## ✅ Positive Observations
### 1. Notification Service Working Correctly
- ✅ Service initialized properly
- ✅ Count fetched: **100 total, 66 unread**
- ✅ List fetched: **20 notifications**
- ✅ Caching working: `Cached notification counts for user`
- ✅ Leantime adapter functioning correctly
### 2. Infrastructure Healthy
- ✅ Redis connection successful
- ✅ Database queries working (Prisma)
- ✅ IMAP connection successful (633ms)
- ✅ External APIs responding (News, Leantime, Rocket.Chat)
### 3. Data Flow
- ✅ Session creation successful
- ✅ User authentication working
- ✅ Token refresh logic functioning
- ✅ OAuth token management working
---
## 📊 Performance Metrics from Log
| Metric | Value | Status |
|--------|-------|--------|
| Redis Connection | ✅ Success | Good |
| IMAP Connection Time | 633ms | Acceptable |
| Notification Count Fetch | ✅ Success | Good |
| Notification List Fetch | ✅ Success | Good |
| Session Callbacks | 10+ in short period | ⚠️ Too many |
| Database Queries | ✅ Working | Good |
---
## 🔧 Recommended Actions
### Immediate (This Week)
1. **Reduce Session Callback Logging**
- Add environment-based conditional logging
- Only log errors or use `DEBUG_SESSION` flag
- **File**: `app/api/auth/options.ts`
2. **Add Logging to Mark-as-Read Endpoints**
- Add console.log to track when mark-as-read is called
- Log success/failure
- **Files**:
- `app/api/notifications/[id]/read/route.ts`
- `app/api/notifications/read-all/route.ts`
3. **Test Notification Mark-as-Read Flow**
- Monitor logs while marking notifications as read
- Verify API calls are being made
- Check if cache invalidation is working
### Short Term (Next Sprint)
4. **Implement Request Deduplication**
- Use the `request-deduplication` utility
- Prevent duplicate API calls
- **Already planned in unified refresh system**
5. **Add Performance Monitoring**
- Track API call frequency
- Monitor session callback frequency
- Alert on excessive calls
6. **Optimize Session Access**
- Consider caching session data
- Reduce redundant `getServerSession()` calls
- Use session context where possible
---
## 🐛 Potential Issues Not Visible in Log
### 1. Client-Side Errors
- Browser console errors not captured in server logs
- Network request failures
- React component errors
### 2. Cache Invalidation
- No logs showing cache invalidation after mark-as-read
- May need to verify `invalidateCache()` is being called
### 3. Race Conditions
- Multiple simultaneous API calls
- State update conflicts
- Not visible in single-threaded log
---
## 📝 Log Patterns Analysis
### Session Callback Pattern
```
=== SESSION CALLBACK START ===
Token error: undefined
Has accessToken: true
Has refreshToken: true
Token role: [...]
Token sub: 203cbc91-61ab-47a2-95d2-b5e1159327d7
Token email: a.tmiri@clm.foundation
...
✅ Session created successfully
=== SESSION CALLBACK END ===
```
**Frequency**: Every API call that uses `getServerSession()`
**Recommendation**: Reduce to error-only logging
---
### Notification Service Pattern
```
[NOTIFICATION_SERVICE] getNotificationCount called for user ...
[LEANTIME_ADAPTER] getNotificationCount called for userId: ...
[LEANTIME_ADAPTER] Notification counts: { total: 100, unread: 66 }
[NOTIFICATION_SERVICE] Cached notification counts for user ...
```
**Status**: ✅ Working correctly
---
## 🎯 Next Steps
1. **Immediate**: Fix session callback logging (5 minutes)
2. **Today**: Add logging to mark-as-read endpoints (10 minutes)
3. **This Week**: Test notification mark-as-read flow end-to-end
4. **Next Sprint**: Implement unified refresh system (already planned)
---
## 📌 Summary
**Overall Assessment**: ✅ **System is functioning correctly**
**Main Concerns**:
1. Excessive logging causing performance overhead
2. Missing visibility into mark-as-read operations
3. Need to verify notification count update flow
**Confidence Level**: 🟢 **HIGH** - Core functionality working, minor optimizations needed
---
**Generated**: 2026-01-01
**Analyst**: AI Code Assistant
**Next Review**: After implementing fixes

View File

@ -1,342 +0,0 @@
# Log Flow Analysis - Application Startup & Runtime
**Date**: 2026-01-01
**Log Source**: Application startup and initial page load
**Analysis Focus**: Flow patterns, errors, and system behavior
---
## 🔍 Executive Summary
**Overall Status**: 🟡 **MOSTLY HEALTHY** with one non-critical error
**Key Findings**:
1. ⚠️ **Syntax Error**: Non-critical error during startup (doesn't block execution)
2. ✅ **Session Management**: Working correctly (5 session callbacks during startup)
3. ✅ **Notification Service**: Initialized and functioning (100 total, 66 unread)
4. ✅ **External Services**: All connecting successfully
5. ⚠️ **No Mark-as-Read Activity**: No API calls to mark notifications as read
---
## 📊 Flow Breakdown
### Phase 1: Application Startup (Lines 1-33)
```
1. Next.js starts (1313ms)
2. Redis connection established ✅
3. Microsoft OAuth configuration loaded ✅
4. ⚠️ SyntaxError: Unexpected identifier 'http' (line 29)
5. Redis connection warmed up ✅
```
**Observations**:
- ✅ Startup is fast (1.3 seconds)
- ✅ Redis connection successful
- ⚠️ **Syntax Error** appears but doesn't block execution
- Error occurs between Redis warmup calls
**Syntax Error Details**:
```
SyntaxError: Unexpected identifier 'http'
at Object.Function [as get] (<anonymous>) {
digest: '2421336728'
}
```
**Analysis**:
- Error is in a route handler (Function.get)
- Likely related to a route file with syntax issue
- Doesn't crash the application
- May be related to dynamic route generation
**Recommendation**: Investigate route files for syntax errors, especially those using `http` in identifiers.
---
### Phase 2: Initial Session Creation (Lines 34-71)
```
1. Session callback triggered
2. Token validation ✅
3. User roles extracted ✅
4. Session created successfully ✅
```
**Session Details**:
- User ID: `203cbc91-61ab-47a2-95d2-b5e1159327d7`
- Email: `a.tmiri@clm.foundation`
- Roles: `['expression', 'entrepreneurship', 'admin', 'dataintelligence', 'mediation', 'mentors']`
- Tokens: Access token ✅, Refresh token ✅
**Status**: ✅ **HEALTHY**
---
### Phase 3: Rocket.Chat Integration (Lines 72-91)
```
1. Rocket.Chat base URL: https://parole.slm-lab.net ✅
2. Users list fetched (13 users) ✅
3. User found: aminetmiri ✅
4. Subscriptions filtered (1 room) ✅
5. Messages fetched (5 messages) ✅
6. Messages cached ✅
7. ⚠️ "No valid session or email found" (line 92)
```
**Observations**:
- ✅ Rocket.Chat integration working
- ✅ User authentication successful
- ✅ Messages retrieved and cached
- ⚠️ Warning message at line 92 (may be from another service)
**Status**: ✅ **HEALTHY** (warning is likely from a different service)
---
### Phase 4: Additional Session Callbacks (Lines 93-169)
```
1. Session callback #2 (lines 93-130)
2. Session callback #3 (lines 132-169)
```
**Pattern**: Multiple session callbacks during initial page load
**Frequency**: 3 session callbacks in ~40 lines of log
**Analysis**:
- Normal behavior for Next.js with multiple API routes
- Each `getServerSession()` call triggers session callback
- All callbacks successful ✅
**Status**: ✅ **NORMAL** (but verbose logging as discussed)
---
### Phase 5: Notification Service Initialization (Lines 170-246)
```
1. Notification service instance created ✅
2. Leantime adapter initialized ✅
3. Adapter registered ✅
4. getNotificationCount called ✅
5. Leantime API called ✅
6. Response received (200) ✅
7. Notifications parsed ✅
8. Count calculated: 100 total, 66 unread ✅
9. Counts cached ✅
```
**Notification Details**:
- **Total**: 100 notifications
- **Unread**: 66 notifications
- **Source**: Leantime
- **Status**: ✅ **WORKING CORRECTLY**
**Flow**:
```
[NOTIFICATION_SERVICE] → [LEANTIME_ADAPTER] → Leantime API → Parse → Cache
```
**Status**: ✅ **HEALTHY**
---
### Phase 6: Additional Operations (Lines 247-289)
```
1. IMAP pool status logged
2. Session callback #4 (lines 248-285)
3. Cached messages used
4. IMAP pool status logged again
```
**Observations**:
- ✅ IMAP connection pool healthy (0 active, max 20)
- ✅ Session callbacks continuing (normal)
- ✅ Caching working (messages from cache)
**Status**: ✅ **HEALTHY**
---
## 🔴 Issues Identified
### 1. Syntax Error (Line 29) ⚠️
**Error**:
```
SyntaxError: Unexpected identifier 'http'
at Object.Function [as get] (<anonymous>)
```
**Impact**:
- ⚠️ **Low**: Doesn't crash application
- ⚠️ **Unknown**: May affect specific route
- ⚠️ **Non-blocking**: Application continues normally
**Possible Causes**:
1. Route file with syntax error
2. Dynamic route generation issue
3. Template literal or string interpolation problem
4. Environment variable parsing issue
**Investigation Steps**:
1. Search codebase for routes using `http` as identifier
2. Check dynamic route files
3. Review route handlers for syntax errors
4. Check Next.js route generation
**Priority**: 🟡 **MEDIUM** - Should be fixed but not blocking
---
### 2. "No valid session or email found" (Line 92) ⚠️
**Message**: `No valid session or email found`
**Context**: Appears after Rocket.Chat operations
**Analysis**:
- May be from a different service/route
- Doesn't affect Rocket.Chat functionality
- Could be from email service or another API route
**Investigation**: Check which service logs this message
**Priority**: 🟡 **LOW** - Appears to be a warning, not an error
---
### 3. No Mark-as-Read Activity ⚠️
**Observation**: No `[NOTIFICATION_API]` log entries
**Expected**: Should see logs when user marks notifications as read
**Possible Reasons**:
1. User hasn't tested mark-as-read yet
2. API calls not reaching server
3. Client-side errors preventing API calls
**Status**: ⏳ **PENDING TESTING**
**Action**: Test mark-as-read functionality and check for new log entries
---
## ✅ Positive Observations
### 1. Fast Startup
- ✅ Application ready in 1.3 seconds
- ✅ All services initialized quickly
### 2. Session Management
- ✅ All session callbacks successful
- ✅ Token validation working
- ✅ User roles extracted correctly
### 3. Notification Service
- ✅ Service initialized correctly
- ✅ Leantime adapter working
- ✅ API calls successful
- ✅ Caching functioning
### 4. External Services
- ✅ Redis connected
- ✅ Rocket.Chat connected
- ✅ Leantime API responding
- ✅ IMAP pool healthy
---
## 📈 Performance Metrics
| Metric | Value | Status |
|--------|-------|--------|
| Startup Time | 1313ms | ✅ Good |
| Redis Connection | ✅ Success | ✅ Good |
| Session Callbacks | 5 during startup | ✅ Normal |
| Notification Count | 100 total, 66 unread | ✅ Working |
| Rocket.Chat | ✅ Connected | ✅ Good |
| IMAP Pool | 0/20 active | ✅ Healthy |
---
## 🔄 Flow Patterns
### Session Callback Pattern
```
Every getServerSession() call → Session callback → Token validation → Session created
```
**Frequency**: 5 times during startup (normal for multi-route page)
**Recommendation**: Conditional logging (as discussed in impact analysis)
---
### Notification Service Pattern
```
Service init → Adapter registration → API call → Parse → Cache
```
**Status**: ✅ Working correctly
---
## 🎯 Recommendations
### Immediate Actions
1. **Investigate Syntax Error** 🔴
- Search for route files with `http` identifier
- Check dynamic routes
- Fix syntax error
2. **Test Mark-as-Read** 🟡
- Mark a notification as read
- Check logs for `[NOTIFICATION_API]` entries
- Verify notification count updates
3. **Identify "No valid session" Source** 🟡
- Find which service logs this message
- Determine if it's an error or warning
- Fix if necessary
### Future Improvements
4. **Implement Conditional Session Logging** (as planned)
- Add `DEBUG_SESSION` flag
- Reduce production logging
- Keep error logging
5. **Add Error Monitoring**
- Track syntax errors
- Monitor route handler failures
- Alert on critical errors
---
## 📝 Summary
**Overall Assessment**: 🟢 **HEALTHY** with minor issues
**Critical Issues**: 0
**Warnings**: 2 (syntax error, "no valid session" message)
**Working Correctly**: ✅ All core functionality
**Next Steps**:
1. Fix syntax error (investigate route files)
2. Test mark-as-read functionality
3. Identify source of "no valid session" message
4. Proceed with conditional session logging (when ready)
---
**Generated**: 2026-01-01
**Status**: Ready for action items

View File

@ -1,96 +0,0 @@
# Log Search Instructions - Mark All As Read
**Purpose**: Find the exact error causing mark-all-as-read to fail
---
## 🔍 What to Do
After you do `rm -rf .next && npm run build && npm start` and test "mark all as read", please:
### Option 1: Search for Specific Markers
In your log output, search for these exact strings:
```bash
# Search for the adapter start marker
grep "===== markAllAsRead START =====" log
# Search for all notification service logs
grep "NOTIFICATION_SERVICE.*markAllAsRead" log
# Search for all leantime adapter logs
grep "LEANTIME_ADAPTER.*markAllAsRead" log
# Search for API logs
grep "NOTIFICATION_API.*Mark all as read" log
```
### Option 2: Provide Complete Log Snippet
When you test "mark all as read", copy the **COMPLETE** log output from:
- **Before**: 5-10 lines before `[NOTIFICATION_API] Mark all as read endpoint called`
- **After**: 50-100 lines after the failure
This will show us the full flow.
---
## 🎯 What We're Looking For
### Expected Log Sequence
```
[NOTIFICATION_API] Mark all as read endpoint called
[NOTIFICATION_API] Mark all as read - Processing { userId: '...', timestamp: '...' }
[NOTIFICATION_SERVICE] markAllAsRead called for user ...
[NOTIFICATION_SERVICE] Available adapters: leantime
[NOTIFICATION_SERVICE] Processing adapter: leantime
[NOTIFICATION_SERVICE] Adapter leantime is configured: true
[NOTIFICATION_SERVICE] Calling markAllAsRead on adapter leantime
[LEANTIME_ADAPTER] ===== markAllAsRead START ===== ← MUST APPEAR
[LEANTIME_ADAPTER] markAllAsRead called for userId: ...
[LEANTIME_ADAPTER] API URL: ...
[LEANTIME_ADAPTER] Has API Token: true
[LEANTIME_ADAPTER] markAllAsRead - User email: ...
[LEANTIME_ADAPTER] markAllAsRead - Leantime user ID: ...
[LEANTIME_ADAPTER] markAllAsRead - Request body: {...}
[LEANTIME_ADAPTER] markAllAsRead - Response status: XXX
[LEANTIME_ADAPTER] markAllAsRead - Response body: {...}
[LEANTIME_ADAPTER] markAllAsRead - API Error: {...} ← This will show the actual error
[NOTIFICATION_SERVICE] Adapter leantime markAllAsRead result: false
```
---
## ❓ Questions
1. **Do you see `[NOTIFICATION_SERVICE] markAllAsRead called for user`?**
- If NO → Service layer not being called
- If YES → Continue to next question
2. **Do you see `[NOTIFICATION_SERVICE] Calling markAllAsRead on adapter leantime`?**
- If NO → Adapter not being called
- If YES → Continue to next question
3. **Do you see `===== markAllAsRead START =====`?**
- If NO → Adapter method not executing (very strange!)
- If YES → We'll see the Leantime API error
---
## 🔧 Quick Test
After restart, run this command to see if the marker appears:
```bash
# Test mark all as read, then immediately:
tail -n 200 log | grep -A 50 "Mark all as read"
```
This will show the last 200 lines of the log, filtered for mark-all-as-read operations, with 50 lines of context after each match.
---
**Status**: Enhanced logging with multiple output methods. Awaiting complete log output to identify the exact failure point.

View File

@ -1,140 +0,0 @@
# Mark All As Read - Cache Issue Analysis
**Date**: 2026-01-01
**Issue**: After marking all as read, list is empty but count still shows 66
---
## 🔍 Problem Analysis
### Current Flow
1. **User clicks "Mark all as read"**
2. **`markAllAsRead()` is called**
3. **Fetches notifications**: `this.getNotifications(userId, 1, 1000)`
- ⚠️ **PROBLEM**: This goes through `NotificationService.getNotifications()`
- ⚠️ **PROBLEM**: Which uses **CACHED** data if available
- ⚠️ **PROBLEM**: Cached notifications still have `isRead: false`
4. **Filters unread**: Gets 66 unread from cached data
5. **Marks each as read**: Calls Leantime API for each
6. **Invalidates cache**: After marking completes
7. **Count is fetched**: But might use stale cache or be fetched before invalidation
### The Issue
**Cache Race Condition**:
- `markAllAsRead` uses cached notifications (which are stale)
- Marks them as read in Leantime
- Invalidates cache
- But count might be fetched from cache **before** invalidation completes
- Or count cache might not be properly invalidated
**Why List is Empty**:
- After marking, all notifications are read
- List might filter to show only unread
- So list is empty (correct behavior)
- But count still shows 66 (stale cache)
---
## 🔧 Root Causes
### 1. Using Cached Data in `markAllAsRead`
**Current Code**:
```typescript
// In markAllAsRead
const allNotifications = await this.getNotifications(userId, 1, 1000);
```
**Problem**: `getNotifications()` uses cache, so we're working with stale data.
**Solution**: Fetch directly from Leantime API, bypassing cache.
---
### 2. Cache Invalidation Timing
**Current Flow**:
1. Mark all as read (uses cached data)
2. Invalidate cache
3. Count is fetched (might use stale cache if fetched too soon)
**Problem**: Race condition between invalidation and count fetch.
**Solution**:
- Invalidate cache **before** marking (or fetch fresh data)
- Force immediate count refresh after marking
- Add delay before count fetch to ensure cache is cleared
---
### 3. Count Cache Not Properly Invalidated
**Current Code**:
```typescript
if (success) {
await this.invalidateCache(userId);
}
```
**Problem**: If `markAllAsRead` fails partially, cache might not be invalidated.
**Solution**: Always invalidate cache, even on partial success.
---
## ✅ Recommended Fixes
### Fix 1: Bypass Cache in `markAllAsRead`
**Change**: Fetch notifications directly from Leantime API, not through cached service.
**Implementation**:
- Add a method to fetch notifications directly from adapter (bypassing cache)
- Or add a `forceRefresh` parameter to `getNotifications`
- Or fetch directly in `markAllAsRead` using Leantime API
### Fix 2: Always Invalidate Cache
**Change**: Invalidate cache even if some notifications fail to mark.
**Implementation**:
- Invalidate cache if **any** notifications were successfully marked
- Not just if **all** succeeded
### Fix 3: Force Fresh Count After Marking
**Change**: After marking, force an immediate fresh count fetch.
**Implementation**:
- After `markAllAsRead` completes, immediately call `getNotificationCount()` with cache bypass
- Or add a delay before count fetch to ensure cache is cleared
---
## 📊 Expected Behavior After Fixes
### After Mark All As Read
**Before**:
- List: Empty (all read) ✅
- Count: 66 (stale cache) ❌
**After**:
- List: Empty (all read) ✅
- Count: 0 (fresh data) ✅
---
## 🎯 Next Steps
1. **Fix cache usage in `markAllAsRead`**: Fetch fresh data, not cached
2. **Improve cache invalidation**: Always invalidate, even on partial success
3. **Force count refresh**: Immediately fetch fresh count after marking
4. **Test**: Verify count updates correctly after marking
---
**Status**: Analysis complete. Ready to implement fixes.

View File

@ -1,132 +0,0 @@
# Mark All As Read - Diagnostic Guide
**Issue**: Adapter returns `false` but no detailed logs appear
---
## 🔍 Current Situation
**What We See**:
```
[NOTIFICATION_SERVICE] Adapter leantime markAllAsRead result: false
[NOTIFICATION_SERVICE] markAllAsRead results: [ false ]
[NOTIFICATION_SERVICE] markAllAsRead overall success: false
```
**What's Missing**:
- `[NOTIFICATION_SERVICE] markAllAsRead called for user ...`
- `[NOTIFICATION_SERVICE] Processing adapter: leantime`
- `[NOTIFICATION_SERVICE] Calling markAllAsRead on adapter leantime`
- `[LEANTIME_ADAPTER] ===== markAllAsRead START =====` ← **NEW: Very prominent marker**
---
## 🚨 Possible Causes
### 1. Server Not Fully Restarted
**Solution**: Do a **hard restart**:
```bash
# Stop completely
sudo npm stop
# Or kill the process
sudo pkill -f "next start"
# Wait a few seconds
sleep 3
# Start fresh
sudo npm start
```
### 2. Next.js Build Cache
**Solution**: Clear cache and rebuild:
```bash
rm -rf .next
sudo npm start
```
### 3. Log Buffering/Filtering
**Solution**: Check if logs are being filtered. Look for ALL logs around the mark-all-as-read operation.
### 4. Code Not Deployed
**Solution**: Verify the file was saved and the server picked it up.
---
## ✅ What to Look For After Restart
### Expected Complete Log Flow
When you click "Mark all as read", you should see **ALL** of these logs:
```
[NOTIFICATION_API] Mark all as read endpoint called
[NOTIFICATION_API] Mark all as read - Processing { userId: '...', timestamp: '...' }
[NOTIFICATION_SERVICE] markAllAsRead called for user ...
[NOTIFICATION_SERVICE] Available adapters: leantime
[NOTIFICATION_SERVICE] Processing adapter: leantime
[NOTIFICATION_SERVICE] Adapter leantime is configured: true
[NOTIFICATION_SERVICE] Calling markAllAsRead on adapter leantime
[LEANTIME_ADAPTER] ===== markAllAsRead START ===== ← VERY PROMINENT
[LEANTIME_ADAPTER] markAllAsRead called for userId: ...
[LEANTIME_ADAPTER] API URL: ...
[LEANTIME_ADAPTER] Has API Token: true
[LEANTIME_ADAPTER] markAllAsRead - User email: ...
[LEANTIME_ADAPTER] markAllAsRead - Leantime user ID: ...
[LEANTIME_ADAPTER] markAllAsRead - Request body: {...}
[LEANTIME_ADAPTER] markAllAsRead - API URL: ...
[LEANTIME_ADAPTER] markAllAsRead - Response status: XXX
[LEANTIME_ADAPTER] markAllAsRead - Response body: {...}
[LEANTIME_ADAPTER] markAllAsRead - Parsed response: {...}
[LEANTIME_ADAPTER] markAllAsRead - API Error: {...} ← This will show the actual error
[LEANTIME_ADAPTER] ===== markAllAsRead END (success: false) =====
[NOTIFICATION_SERVICE] Adapter leantime markAllAsRead result: false
[NOTIFICATION_SERVICE] markAllAsRead results: [ false ]
[NOTIFICATION_SERVICE] markAllAsRead overall success: false
[NOTIFICATION_SERVICE] Not invalidating caches - operation failed
[NOTIFICATION_API] Mark all as read - Failed { userId: '...', duration: '...ms' }
```
---
## 🎯 Critical Check
**After restarting**, search your logs for:
```
===== markAllAsRead START =====
```
If you **DON'T** see this line, the adapter method is **NOT** being called, which means:
- Server not restarted properly
- Code not deployed
- Different code path being used
If you **DO** see this line, we'll have all the details we need to fix the Leantime API call.
---
## 📋 Action Items
1. ✅ **Hard Restart Server** (stop completely, wait, start)
2. ✅ **Test Mark All As Read**
3. ✅ **Search logs for `===== markAllAsRead START =====`**
4. ✅ **Share ALL logs** from the mark-all-as-read operation
5. ✅ **Look for `API Error:`** in the logs (this will show what Leantime is returning)
---
## 🔧 If Logs Still Don't Appear
If after restart you still don't see the `===== markAllAsRead START =====` log:
1. **Verify file was saved**: Check `lib/services/notifications/leantime-adapter.ts` line 220-224
2. **Check for syntax errors**: Run `npm run build` or check for TypeScript errors
3. **Verify server is using the file**: Check if there are multiple versions or build artifacts
4. **Check log output**: Make sure you're looking at the right log file/stream
---
**Status**: Enhanced logging with prominent markers added. Awaiting server restart and test.
**Next**: After restart, the `===== markAllAsRead START =====` marker will confirm the method is being called, and we'll see the exact Leantime API error.

View File

@ -1,153 +0,0 @@
# Microsoft OAuth Token Management Analysis
## Current Implementation
### Token Storage Locations
1. **Redis Cache** (Primary for OAuth tokens)
- **Location**: `lib/redis.ts``cacheEmailCredentials()`
- **TTL**: 24 hours (`TTL.CREDENTIALS = 60 * 60 * 24`)
- **Stored**: `accessToken`, `refreshToken`, `tokenExpiry`, `useOAuth`
- **Key Format**: `email:credentials:${userId}:${accountId}`
2. **Prisma Database** (Schema has fields but NOT used for OAuth tokens)
- **Location**: `prisma/schema.prisma``MailCredentials` model
- **Fields Available**: `refresh_token`, `access_token`, `token_expiry`, `use_oauth`
- **Current Status**: ❌ **Tokens are NOT saved to Prisma** (only Redis)
- **Code Comment**: "OAuth fields don't exist" (but they DO exist in schema!)
### Token Refresh Flow
**Location**: `lib/services/token-refresh.ts``ensureFreshToken()`
1. Checks Redis for credentials
2. Validates token expiry (5-minute buffer)
3. Refreshes token if needed via Microsoft API
4. **Updates Redis only** (not Prisma)
5. Returns new access token
### Issues Identified
#### 🔴 Critical Issue #1: Refresh Tokens Not Persisted to Database
**Problem**:
- Refresh tokens are only stored in Redis with 24-hour TTL
- If Redis is cleared, restarted, or TTL expires, refresh tokens are **permanently lost**
- Microsoft refresh tokens can last up to **90 days** (or indefinitely with `offline_access` scope)
- Users would need to re-authenticate if Redis data is lost
**Impact**:
- ❌ Not viable for long-term production use
- ❌ Data loss risk on Redis restarts
- ❌ No backup/recovery mechanism
#### 🟡 Issue #2: Token Refresh Doesn't Update Database
**Problem**:
- When tokens are refreshed, only Redis is updated
- Prisma database still has old/expired tokens (if any)
- Schema has the fields but they're never populated
**Impact**:
- ⚠️ Inconsistency between Redis and Database
- ⚠️ Can't recover from Redis cache loss
#### 🟡 Issue #3: Missing Refresh Token in Logs
From your logs:
```
hasRefreshToken: false
```
This suggests the refresh token might not be properly saved or retrieved.
### Microsoft OAuth Token Lifespan
- **Access Token**: ~1 hour (3600 seconds)
- **Refresh Token**: Up to 90 days (with `offline_access` scope)
- **Token Refresh**: Returns new access token, may return new refresh token
### Required Scopes
Current implementation uses:
```typescript
const REQUIRED_SCOPES = [
'offline_access', // ✅ Required for long-lived refresh tokens
'https://outlook.office.com/IMAP.AccessAsUser.All',
'https://outlook.office.com/SMTP.Send'
].join(' ');
```
`offline_access` is included - this is correct for long-term use.
## Recommendations
### ✅ Fix #1: Persist Refresh Tokens to Prisma
**Why**: Refresh tokens are critical for long-term access and should be persisted to database.
**Implementation**:
1. Save `refresh_token` to Prisma `MailCredentials.refresh_token` field
2. Update `token_expiry` when tokens are refreshed
3. Keep access tokens in Redis (short-lived, can be regenerated)
4. Use Prisma as source of truth for refresh tokens
### ✅ Fix #2: Update Database on Token Refresh
**Why**: Keep database in sync with refreshed tokens.
**Implementation**:
1. After refreshing tokens, update Prisma `MailCredentials` record
2. Update `access_token` and `token_expiry` fields
3. Update `refresh_token` if Microsoft returns a new one
### ✅ Fix #3: Fallback to Database if Redis Missing
**Why**: Recover from Redis cache loss.
**Implementation**:
1. If Redis cache is empty, check Prisma for refresh token
2. Use Prisma refresh token to get new access token
3. Re-populate Redis cache
## Long-Term Viability Assessment
### Current State: ⚠️ **NOT VIABLE** for long-term production
**Reasons**:
1. ❌ Refresh tokens only in volatile Redis cache
2. ❌ No persistence mechanism
3. ❌ Risk of data loss on Redis restart
4. ❌ No recovery mechanism
### After Fixes: ✅ **VIABLE** for long-term production
**With recommended fixes**:
1. ✅ Refresh tokens persisted to database
2. ✅ Redis used for fast access token retrieval
3. ✅ Database as source of truth
4. ✅ Recovery mechanism in place
## Token Storage Strategy (Recommended)
### Access Tokens
- **Storage**: Redis (fast, short-lived)
- **TTL**: 1 hour (matches Microsoft token expiry)
- **Purpose**: Fast IMAP/SMTP authentication
### Refresh Tokens
- **Storage**: Prisma Database (persistent, long-term)
- **TTL**: None (stored indefinitely until revoked)
- **Purpose**: Long-term access, token renewal
### Token Expiry
- **Storage**: Both Redis and Prisma
- **Purpose**: Know when to refresh tokens
## Implementation Priority
1. **HIGH**: Persist refresh tokens to Prisma
2. **HIGH**: Update Prisma on token refresh
3. **MEDIUM**: Add fallback to database if Redis missing
4. **LOW**: Add token encryption at rest (if required by compliance)

View File

@ -1,130 +0,0 @@
# Microsoft OAuth Token Management - Fixes Applied
## Issues Fixed
### ✅ Fix #1: Refresh Tokens Now Persisted to Prisma Database
**Problem**: Refresh tokens were only stored in Redis (24-hour TTL), risking permanent loss.
**Solution**:
- Refresh tokens are now saved to `MailCredentials.refresh_token` in Prisma
- Access tokens and expiry also persisted to database
- Database acts as source of truth for long-term token storage
**Files Modified**:
- `lib/services/email-service.ts` - `saveUserEmailCredentials()` now saves OAuth tokens to Prisma
### ✅ Fix #2: Database Updated on Token Refresh
**Problem**: When tokens were refreshed, only Redis was updated, leaving database stale.
**Solution**:
- Token refresh now updates both Redis AND Prisma
- New refresh tokens (if provided by Microsoft) are persisted
- Token expiry timestamp updated in database
**Files Modified**:
- `lib/services/token-refresh.ts` - `ensureFreshToken()` now updates Prisma after refresh
### ✅ Fix #3: Fallback to Database if Redis Missing
**Problem**: If Redis cache was empty, system couldn't recover refresh tokens.
**Solution**:
- If Redis cache miss, system checks Prisma database
- Retrieves refresh token from database
- Re-populates Redis cache for future use
**Files Modified**:
- `lib/services/token-refresh.ts` - Added database fallback logic
### ✅ Fix #4: OAuth Fields Retrieved from Database
**Problem**: When loading credentials from database, OAuth fields were ignored.
**Solution**:
- Database queries now include OAuth fields (`access_token`, `refresh_token`, `token_expiry`, `use_oauth`)
- Credentials object properly populated with OAuth data from database
**Files Modified**:
- `lib/services/email-service.ts` - `getImapConnection()` now includes OAuth fields from database
## Token Storage Strategy (Current)
### Access Tokens
- **Primary**: Redis (fast access, 24-hour TTL)
- **Backup**: Prisma Database (persisted)
- **Lifespan**: ~1 hour (Microsoft default)
### Refresh Tokens
- **Primary**: Prisma Database (persistent, long-term)
- **Cache**: Redis (24-hour TTL, for fast access)
- **Lifespan**: Up to 90 days (with `offline_access` scope)
### Token Expiry
- **Storage**: Both Redis and Prisma
- **Purpose**: Determine when to refresh tokens
## Long-Term Viability
### ✅ NOW VIABLE for Production
**Improvements**:
1. ✅ Refresh tokens persisted to database
2. ✅ Database updated on token refresh
3. ✅ Fallback mechanism if Redis fails
4. ✅ No data loss on Redis restart
5. ✅ Recovery mechanism in place
## What Happens Now
### When Adding Microsoft Account:
1. OAuth tokens saved to **both** Redis and Prisma
2. Refresh token stored in database for long-term access
3. Access token cached in Redis for fast retrieval
### When Token Expires:
1. System checks Redis first (fast path)
2. If Redis miss, checks Prisma database (fallback)
3. Uses refresh token to get new access token
4. Updates **both** Redis and Prisma with new tokens
5. Continues normal operation
### If Redis is Cleared:
1. System detects Redis cache miss
2. Retrieves refresh token from Prisma database
3. Gets new access token using refresh token
4. Re-populates Redis cache
5. **No user action required**
## Testing Recommendations
1. **Test Token Refresh**:
- Wait for access token to expire (~1 hour)
- Verify system automatically refreshes
- Check both Redis and Prisma are updated
2. **Test Redis Failure**:
- Clear Redis cache
- Try to access email
- Verify system recovers from database
3. **Test Long-Term Access**:
- Wait several days
- Verify refresh token still works
- Check no re-authentication required
## Monitoring
Watch for these log messages:
- ✅ `Token for ${email} persisted to Prisma database` - Token saved successfully
- ✅ `Recovered credentials from Prisma and cached in Redis` - Fallback working
- ⚠️ `Error persisting tokens to database` - Database update failed (check logs)
## Next Steps
1. **Monitor**: Watch logs for token refresh operations
2. **Verify**: Check Prisma database has `refresh_token` values
3. **Test**: Verify email access works after Redis restart
4. **Optional**: Consider encrypting tokens at rest (if compliance requires)

View File

@ -1,930 +0,0 @@
# Analyse Complète : Pages Missions et Centrale - Workflow Complet
## 📋 Table des Matières
1. [Vue d'ensemble](#vue-densemble)
2. [Architecture des Pages](#architecture-des-pages)
3. [Workflow de Navigation](#workflow-de-navigation)
4. [Workflow de Création de Mission](#workflow-de-création-de-mission)
5. [Workflow de Consultation](#workflow-de-consultation)
6. [API Routes](#api-routes)
7. [Base de Données](#base-de-données)
8. [Intégrations Externes](#intégrations-externes)
9. [Stockage de Fichiers](#stockage-de-fichiers)
10. [Composants Réutilisables](#composants-réutilisables)
---
## 🎯 Vue d'ensemble
### Page "Centrale"
- **Route**: `/missions`
- **Nom dans le menu**: "Centrale"
- **Accès**: Rôles `entrepreneurship` ou `admin` (défini dans `components/main-nav.tsx`)
- **Description**: Centre d'Administration et de Pilotage (CAP) - Interface principale pour gérer les missions
### Page "Missions"
- **Route principale**: `/missions`
- **Sous-routes**:
- `/missions` - Liste des missions de l'utilisateur
- `/missions/new` - Création d'une nouvelle mission
- `/missions/[missionId]` - Détails d'une mission
- `/missions/[missionId]/edit` - Édition d'une mission
### Page "Mission Tab" (Tableau des Missions)
- **Route**: `/mission-tab`
- **Description**: Vue publique de toutes les missions disponibles
- **Sous-routes**:
- `/mission-tab` - Liste de toutes les missions
- `/mission-tab/[missionId]` - Détails d'une mission (vue publique)
---
## 🏗️ Architecture des Pages
### 1. Layout Principal - Missions (`app/missions/layout.tsx`)
**Structure**:
```
┌─────────────────────────────────────────┐
│ Sidebar (CAP) - Fond rose clair │
│ ┌───────────────────────────────────┐ │
│ │ CAP │ │
│ │ Centre d'Administration et de │ │
│ │ Pilotage │ │
│ └───────────────────────────────────┘ │
│ • Mes Missions (/missions) │
│ • Nouvelle Mission (/missions/new) │
└─────────────────────────────────────────┘
│ Contenu Principal (children) │
└─────────────────────────────────────────┘
```
**Fonctionnalités**:
- Sidebar fixe avec navigation
- Fond rose clair (`bg-pink-50`) pour la sidebar
- Fond blanc pour le contenu principal
- Navigation active highlightée
### 2. Page Liste des Missions (`app/missions/page.tsx`)
**Fonctionnalités**:
- Affichage en grille (responsive: 1/2/3 colonnes)
- Recherche par nom, niveau, type, ODD scope
- Filtrage en temps réel
- Cartes de mission avec:
- Logo (ou initiales si pas de logo)
- Nom de la mission
- Badge niveau (A/B/C/S) avec couleurs
- Icône ODD (Objectifs de Développement Durable)
- Services associés
- Description (intention) tronquée
- Date de création
- Bouton "Voir détails"
**API utilisée**: `GET /api/missions`
- Retourne uniquement les missions où l'utilisateur est:
- Créateur (`creatorId`)
- Ou membre (`missionUsers`)
### 3. Page Création de Mission (`app/missions/new/page.tsx`)
**Composant principal**: `MissionsAdminPanel`
- Formulaire multi-onglets (5 onglets)
- Navigation séquentielle avec boutons Précédent/Suivant
### 4. Page Détails Mission (`app/missions/[missionId]/page.tsx`)
**Fonctionnalités**:
- Affichage complet des informations
- Logo de la mission
- Grille d'informations (Type, Donneur d'ordre, Durée, Niveau, Participation, ODD)
- Description complète
- Liste des documents/attachments
- Profils recherchés
- Services
- Bouton de suppression (si créateur ou admin)
**API utilisée**: `GET /api/missions/[missionId]`
### 5. Page Mission Tab (`app/mission-tab/page.tsx`)
**Différences avec `/missions`**:
- Affiche **TOUTES** les missions (pas de filtre utilisateur)
- API utilisée: `GET /api/missions/all`
- Vue publique pour découvrir toutes les missions disponibles
---
## 🔄 Workflow de Navigation
### Accès à la Centrale
```
1. Utilisateur connecté avec rôle "entrepreneurship" ou "admin"
2. Menu déroulant utilisateur (MainNav)
3. Clic sur "Centrale" (href: '/missions')
4. Redirection vers /missions
5. Layout Missions s'affiche avec sidebar CAP
6. Page Liste des Missions (/missions/page.tsx)
```
### Navigation dans la Centrale
```
┌─────────────────────────────────────────┐
│ Sidebar CAP │
│ ├─ Mes Missions (/missions) │
│ └─ Nouvelle Mission (/missions/new) │
└─────────────────────────────────────────┘
│ │
│ │
▼ ▼
┌─────────────────┐ ┌──────────────────┐
│ Liste Missions │ │ Création Mission │
│ │ │ │
│ [Carte Mission] │ │ [Formulaire] │
│ └─► Détails │ │ │
└─────────────────┘ └──────────────────┘
┌─────────────────┐
│ Détails Mission │
│ │
│ [Éditer] │
│ [Supprimer] │
└─────────────────┘
```
---
## 🚀 Workflow de Création de Mission
### Étape 1: Accès au Formulaire
```
User → /missions/new → MissionsAdminPanel
```
### Étape 2: Formulaire Multi-Onglets
**Onglet 1: General**
- Nom de la mission (requis)
- Logo (upload)
- ODD scope (requis) - Sélection parmi 17 ODD
- Niveau (requis) - A/B/C/S
- Intention (requis) - Description avec éditeur de texte
**Onglet 2: Details**
- Type de mission (requis) - Remote/Onsite/Hybrid
- Donneur d'ordre (requis) - Individu/ONG/Start-ups
- Projection (requis) - Short/Medium/Long term
- Services - Checkboxes (Gite, ArtLab, Calcul)
- Participation (requis) - Volontaire/Cooptation
- Profils - Checkboxes (DataIntelligence, Expression, Mediation, Investigation, Coding, Lean)
**Onglet 3: Attachments**
- Upload de fichiers (PDF, DOC, DOCX, XLS, XLSX, JPG, JPEG, PNG)
- Liste des fichiers sélectionnés
- Upload immédiat vers Minio (bucket 'missions')
**Onglet 4: Skills**
- Liste de compétences (non fonctionnel actuellement - placeholders)
**Onglet 5: Membres**
- **Les Gardiens de l'Intention** (3 gardiens requis):
- Gardien du Temps
- Gardien de la Parole
- Gardien de la Mémoire
- **Volontaires** (optionnel)
- Recherche d'utilisateurs ou groupes
- Assignation de rôles
### Étape 3: Validation et Soumission
**Validation**:
```typescript
const requiredFields = {
name: !!missionData.name,
oddScope: Array.isArray(missionData.oddScope) && missionData.oddScope.length > 0,
niveau: !!missionData.niveau,
intention: !!missionData.intention,
missionType: !!missionData.missionType,
donneurDOrdre: !!missionData.donneurDOrdre,
projection: !!missionData.projection,
participation: !!missionData.participation,
gardiens: gardienDuTemps !== null &&
gardienDeLaParole !== null &&
gardienDeLaMemoire !== null
}
```
**Soumission**:
```typescript
POST /api/missions
Body: {
name, oddScope, niveau, intention, missionType,
donneurDOrdre, projection, services, profils,
participation, guardians, volunteers, logo, attachments
}
```
### Étape 4: Traitement Backend
**Séquence d'exécution**:
1. **Création de la mission en base de données**
```typescript
prisma.mission.create({
data: { name, oddScope, niveau, intention, ... }
})
```
2. **Création des MissionUsers (gardiens + volontaires)**
```typescript
prisma.missionUser.createMany({
data: [
{ missionId, userId, role: 'gardien-temps' },
{ missionId, userId, role: 'gardien-parole' },
{ missionId, userId, role: 'gardien-memoire' },
{ missionId, userId, role: 'volontaire' }, // pour chaque volontaire
]
})
```
3. **Upload du logo vers Minio**
- Path: `missions/{missionId}/logo{extension}`
- Bucket: `missions`
- Mise à jour du champ `logo` dans la mission
4. **Upload des attachments vers Minio**
- Path: `missions/{missionId}/attachments/{filename}`
- Création des enregistrements `Attachment` en base
5. **Vérification des fichiers dans Minio**
- Vérifie que tous les fichiers sont bien présents avant de continuer
6. **Déclenchement du workflow N8N**
```typescript
n8nService.triggerMissionCreation({
...missionData,
creatorId,
logoPath,
config: { N8N_API_KEY, MISSION_API_URL }
})
```
7. **Intégrations externes (via N8N)**:
- Création projet Leantime (si applicable)
- Création collection Outline (si applicable)
- Création canal RocketChat (si applicable)
- Création repository Gitea (si applicable)
- Création projet Penpot (si applicable)
8. **Retour succès/erreur**
- Si succès: Redirection vers `/missions`
- Si erreur: Nettoyage des fichiers uploadés + message d'erreur
---
## 👀 Workflow de Consultation
### Consultation Liste des Missions
**Route**: `/missions` ou `/mission-tab`
**Flux**:
```
1. Chargement de la page
2. useEffect → fetch('/api/missions') ou fetch('/api/missions/all')
3. Affichage du loader
4. Réception des données
5. Transformation des données:
- Ajout des logoUrl (si logo existe)
- Formatage des dates
- Calcul des couleurs de badges
- Extraction des infos ODD
6. Filtrage par terme de recherche (si présent)
7. Affichage en grille
```
**Recherche**:
- Filtre en temps réel sur: nom, niveau, type, ODD scope
- Pas de requête API supplémentaire (filtrage côté client)
### Consultation Détails Mission
**Route**: `/missions/[missionId]` ou `/mission-tab/[missionId]`
**Flux**:
```
1. Chargement de la page
2. Récupération du missionId depuis les params
3. useEffect → fetch(`/api/missions/${missionId}`)
4. Affichage du loader
5. Réception des données complètes:
- Mission avec tous les champs
- Creator (id, email)
- MissionUsers (avec user details)
- Attachments (avec publicUrl)
6. Transformation:
- Ajout des logoUrl
- Formatage des dates
- Labels pour les types/niveaux
- URLs publiques pour les attachments
7. Affichage des sections:
- Header avec nom et logo
- Grille d'informations
- Description
- Documents
- Profils recherchés
- Services
- Actions (Éditer/Supprimer)
```
---
## 🔌 API Routes
### 1. `GET /api/missions`
**Fichier**: `app/api/missions/route.ts`
**Fonctionnalité**: Liste les missions de l'utilisateur connecté
**Filtres**:
- `limit` (default: 10)
- `offset` (default: 0)
- `search` (recherche dans name et intention)
- `name` (filtre exact)
**Where Clause**:
```typescript
{
OR: [
{ creatorId: userId },
{ missionUsers: { some: { userId } } }
]
}
```
**Retour**:
```json
{
"missions": [
{
"id": "...",
"name": "...",
"logo": "missions/{id}/logo.png",
"logoUrl": "/api/missions/image/missions/{id}/logo.png",
"oddScope": ["odd-3"],
"niveau": "a",
"missionType": "remote",
"projection": "short",
"services": ["Gite"],
"intention": "...",
"createdAt": "...",
"creator": { "id": "...", "email": "..." },
"missionUsers": [...],
"attachments": [...]
}
],
"pagination": {
"total": 10,
"offset": 0,
"limit": 10
}
}
```
### 2. `POST /api/missions`
**Fichier**: `app/api/missions/route.ts`
**Fonctionnalité**: Crée une nouvelle mission
**Body**:
```typescript
{
name: string;
oddScope: string[];
niveau?: string;
intention?: string;
missionType?: string;
donneurDOrdre?: string;
projection?: string;
services?: string[];
profils?: string[];
participation?: string;
guardians?: {
"gardien-temps": string;
"gardien-parole": string;
"gardien-memoire": string;
};
volunteers?: string[];
logo?: {
data: string; // base64
name?: string;
type?: string;
};
attachments?: Array<{
data: string; // base64
name?: string;
type?: string;
}>;
}
```
**Retour**:
```json
{
"success": true,
"mission": { ... },
"message": "Mission created successfully with all integrations"
}
```
### 3. `GET /api/missions/[missionId]`
**Fichier**: `app/api/missions/[missionId]/route.ts`
**Fonctionnalité**: Récupère les détails d'une mission
**Contrôle d'accès**:
- Utilisateur doit être créateur OU membre de la mission
**Retour**: Mission complète avec relations
### 4. `PUT /api/missions/[missionId]`
**Fichier**: `app/api/missions/[missionId]/route.ts`
**Fonctionnalité**: Met à jour une mission
**Contrôle d'accès**:
- Créateur OU gardien-temps/gardien-parole
**Body**: Même structure que POST (tous les champs optionnels)
### 5. `DELETE /api/missions/[missionId]`
**Fichier**: `app/api/missions/[missionId]/route.ts`
**Fonctionnalité**: Supprime une mission
**Contrôle d'accès**:
- Créateur OU admin uniquement
**Actions**:
- Suppression du logo dans Minio
- Suppression de la mission en base (cascade sur MissionUsers et Attachments)
- TODO: Rollback N8N (non implémenté)
### 6. `GET /api/missions/all`
**Fichier**: `app/api/missions/all/route.ts`
**Fonctionnalité**: Liste TOUTES les missions (pas de filtre utilisateur)
**Différences avec `/api/missions`**:
- Pas de filtre par utilisateur
- Retourne toutes les missions publiques
- Utilisé par `/mission-tab`
### 7. `GET /api/missions/image/[...path]`
**Fichier**: `app/api/missions/image/[...path]/route.ts`
**Fonctionnalité**: Sert les images (logos et attachments) depuis Minio
**Path**: `missions/{missionId}/logo.png` ou `missions/{missionId}/attachments/{filename}`
### 8. `POST /api/missions/upload`
**Fichier**: `app/api/missions/upload/route.ts`
**Fonctionnalité**: Upload de fichiers (logo ou attachments)
### 9. `GET /api/missions/[missionId]/attachments`
**Fichier**: `app/api/missions/[missionId]/attachments/route.ts`
**Fonctionnalité**: Liste les attachments d'une mission
### 10. `POST /api/missions/[missionId]/attachments`
**Fichier**: `app/api/missions/[missionId]/attachments/route.ts`
**Fonctionnalité**: Ajoute un attachment à une mission existante
### 11. `DELETE /api/missions/[missionId]/attachments/[attachmentId]`
**Fichier**: `app/api/missions/[missionId]/attachments/[attachmentId]/route.ts`
**Fonctionnalité**: Supprime un attachment
---
## 🗄️ Base de Données
### Modèle Mission (`prisma/schema.prisma`)
```prisma
model Mission {
id String @id @default(uuid())
name String
logo String? // Path dans Minio
oddScope String[] // Catégories ODD
niveau String // A/B/C/S
intention String // Description
missionType String // remote/onsite/hybrid
donneurDOrdre String // individual/group/organization
projection String // short/medium/long
services String[] // ["Gite", "ArtLab", "Calcul"]
participation String? // volontaire/cooptation
profils String[] // ["DataIntelligence", ...]
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
creator User @relation(fields: [creatorId], references: [id])
creatorId String
attachments Attachment[]
missionUsers MissionUser[]
// Intégrations externes
leantimeProjectId String?
outlineCollectionId String?
rocketChatChannelId String?
giteaRepositoryUrl String?
penpotProjectId String?
@@index([creatorId])
}
```
### Modèle MissionUser
```prisma
model MissionUser {
id String @id @default(uuid())
role String // 'gardien-temps', 'gardien-parole', 'gardien-memoire', 'volontaire'
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
mission Mission @relation(fields: [missionId], references: [id])
missionId String
user User @relation(fields: [userId], references: [id])
userId String
@@unique([missionId, userId, role])
@@index([missionId])
@@index([userId])
}
```
### Modèle Attachment
```prisma
model Attachment {
id String @id @default(uuid())
filename String
filePath String // Path dans Minio: missions/{missionId}/attachments/{filename}
fileType String // MIME type
fileSize Int
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
mission Mission @relation(fields: [missionId], references: [id])
missionId String
uploader User @relation(fields: [uploaderId], references: [id])
uploaderId String
@@index([missionId])
@@index([uploaderId])
}
```
---
## 🔗 Intégrations Externes
### Service N8N (`lib/services/n8n-service.ts`)
**Webhook URL**: `https://brain.slm-lab.net/webhook/mission-created`
**Données envoyées**:
```typescript
{
name, oddScope, niveau, intention, missionType,
donneurDOrdre, projection, services, participation,
profils, guardians, volunteers, creatorId,
config: {
N8N_API_KEY,
MISSION_API_URL
}
}
```
**Workflow N8N déclenche**:
1. Création projet Leantime (si applicable)
2. Création collection Outline (si applicable)
3. Création canal RocketChat (si applicable)
4. Création repository Gitea (si applicable)
5. Création projet Penpot (si applicable)
**Retour**:
```typescript
{
success: boolean;
results?: {
leantimeProjectId?: string;
outlineCollectionId?: string;
rocketChatChannelId?: string;
giteaRepositoryUrl?: string;
penpotProjectId?: string;
failedServices?: {
gitRepo?: boolean;
leantimeProject?: boolean;
docCollection?: boolean;
rocketChatChannel?: boolean;
}
};
error?: string;
}
```
**Rollback** (non implémenté):
- Webhook: `https://brain.slm-lab.net/webhook/mission-rollback`
- Appelé lors de la suppression d'une mission
---
## 📦 Stockage de Fichiers
### Minio Configuration
**Endpoint**: `https://dome-api.slm-lab.net`
**Bucket**: `missions`
**Credentials**: Hardcodés dans `lib/mission-uploads.ts` (⚠️ à sécuriser)
### Structure des Chemins
**Logo**:
```
missions/{missionId}/logo{extension}
Exemple: missions/abc-123/logo.png
```
**Attachments**:
```
missions/{missionId}/attachments/{filename}
Exemple: missions/abc-123/attachments/document.pdf
```
### URLs Publiques
**Format**: `/api/missions/image/{path}`
**Exemples**:
- Logo: `/api/missions/image/missions/{missionId}/logo.png`
- Attachment: `/api/missions/image/missions/{missionId}/attachments/document.pdf`
### Fonctions Utilitaires (`lib/mission-uploads.ts`)
- `getMissionLogoPath()` - Génère le chemin du logo
- `getMissionAttachmentPath()` - Génère le chemin d'un attachment
- `uploadMissionLogo()` - Upload un logo vers Minio
- `uploadMissionAttachment()` - Upload un attachment vers Minio
- `deleteMissionLogo()` - Supprime un logo (TODO)
- `deleteMissionAttachment()` - Supprime un attachment
- `getMissionFileUrl()` - Construit l'URL publique
- `ensureMissionsPrefix()` - Normalise le chemin
---
## 🧩 Composants Réutilisables
### 1. `MissionsAdminPanel` (`components/missions/missions-admin-panel.tsx`)
**Fonctionnalités**:
- Formulaire multi-onglets
- Gestion des gardiens et volontaires
- Upload de fichiers
- Validation complète
- Soumission vers API
**Props**: Aucune (composant autonome)
**State**:
- `missionData` - Données de la mission
- `selectedServices` - Services sélectionnés
- `selectedProfils` - Profils sélectionnés
- `gardienDuTemps`, `gardienDeLaParole`, `gardienDeLaMemoire` - IDs des gardiens
- `volontaires` - Array d'IDs de volontaires
- `activeTab` - Onglet actif
- `isSubmitting` - État de soumission
### 2. `FileUpload` (`components/missions/file-upload.tsx`)
**Fonctionnalités**:
- Upload de logo ou attachment
- Conversion en base64
- Preview pour les images
**Props**:
- `type`: 'logo' | 'attachment'
- `isNewMission`: boolean
- `onFileSelect`: (fileData) => void
### 3. `AttachmentsList` (`components/missions/attachments-list.tsx`)
**Fonctionnalités**:
- Liste des attachments d'une mission
- Upload de nouveaux attachments
- Suppression d'attachments
**Props**:
- `missionId`: string
- `allowUpload`: boolean
- `allowDelete`: boolean
### 4. `MissionsFrame` (`components/missions/missions-frame.tsx`)
**Fonctionnalités**: Wrapper iframe (non utilisé actuellement)
---
## 🔐 Contrôles d'Accès
### Page Centrale (`/missions`)
- **Rôles requis**: `entrepreneurship` ou `admin`
- Vérifié dans `components/main-nav.tsx` via `hasRole()`
### API Routes
- **Authentification**: Session NextAuth requise
- **GET /api/missions**: Missions où user est créateur ou membre
- **GET /api/missions/all**: Toutes les missions (authentifié)
- **GET /api/missions/[id]**: Créateur ou membre
- **PUT /api/missions/[id]**: Créateur ou gardien-temps/gardien-parole
- **DELETE /api/missions/[id]**: Créateur ou admin uniquement
---
## 📊 Flux de Données Complet
### Création de Mission
```
[Frontend]
MissionsAdminPanel
↓ (soumission)
POST /api/missions
[Backend]
1. Validation
2. prisma.mission.create()
3. prisma.missionUser.createMany()
4. uploadMissionLogo() → Minio
5. uploadMissionAttachment() → Minio (pour chaque attachment)
6. prisma.attachment.create() (pour chaque attachment)
7. verifyFileExists() (vérification Minio)
8. n8nService.triggerMissionCreation()
[N8N Workflow]
- Création Leantime
- Création Outline
- Création RocketChat
- Création Gitea
- Création Penpot
[Backend]
9. Retour succès/erreur
[Frontend]
Redirection → /missions
```
### Consultation de Mission
```
[Frontend]
MissionsPage ou MissionDetailPage
fetch('/api/missions') ou fetch('/api/missions/[id]')
[Backend]
1. Vérification session
2. Query Prisma avec relations
3. Transformation des paths en URLs publiques
4. Retour JSON
[Frontend]
Affichage des données
```
### Affichage d'Image
```
[Frontend]
<img src="/api/missions/image/missions/{id}/logo.png" />
[Backend]
GET /api/missions/image/[...path]
Lecture depuis Minio
Stream vers client
```
---
## 🎨 Styles et UI
### Couleurs des Badges Niveau
- **A (Apprentissage)**: `bg-green-100 text-green-800`
- **B (Basique)**: `bg-blue-100 text-blue-800`
- **C (Complexe)**: `bg-purple-100 text-purple-800`
- **S (Spécial)**: `bg-amber-100 text-amber-800`
### Layout Sidebar CAP
- **Fond**: `bg-pink-50`
- **Bordure**: `border-pink-100`
- **Largeur**: `234px` fixe
### Grille de Missions
- **Mobile**: 1 colonne
- **Tablet**: 2 colonnes (`md:grid-cols-2`)
- **Desktop**: 3 colonnes (`lg:grid-cols-3`)
---
## 🐛 Points d'Attention
1. **Credentials Minio hardcodés** dans `lib/mission-uploads.ts` - À déplacer vers variables d'environnement
2. **Rollback N8N non implémenté** lors de la suppression
3. **Skills tab non fonctionnel** - Placeholders uniquement
4. **Presigned URLs non implémentées** - Upload direct uniquement
5. **Gestion d'erreurs N8N** - Partielle (continue même si certaines intégrations échouent)
---
## 📝 Notes Techniques
### Types TypeScript
**Mission Interface** (utilisée dans les pages):
```typescript
interface Mission {
id: string;
name: string;
logo?: string;
logoUrl?: string;
oddScope: string[];
niveau: string;
missionType: string;
projection: string;
participation?: string;
services?: string[];
profils?: string[];
intention?: string;
donneurDOrdre?: string;
createdAt: string;
creator: User;
missionUsers: MissionUser[];
attachments?: Attachment[];
}
```
### Validation
**Côté Frontend**: Validation dans `MissionsAdminPanel.validateMission()`
**Côté Backend**: Validation minimale (name et oddScope requis)
### Gestion d'Erreurs
- **Frontend**: Toast notifications via `useToast()`
- **Backend**: Retour JSON avec `error` et `details`
- **N8N**: Retour avec `success` et `failedServices` pour erreurs partielles
---
## 🔄 Évolutions Possibles
1. **Pagination** côté client pour les listes
2. **Filtres avancés** (par niveau, type, ODD, etc.)
3. **Recherche full-text** dans l'intention
4. **Export** des missions (PDF, CSV)
5. **Notifications** lors de l'assignation à une mission
6. **Statistiques** des missions
7. **Timeline** des activités d'une mission
8. **Commentaires** sur les missions
9. **États** des missions (brouillon, publiée, terminée, etc.)
10. **Permissions granulaires** par rôle de gardien
---
**Document généré le**: $(date)
**Version**: 1.0
**Auteur**: Analyse automatique du codebase

View File

@ -1,982 +0,0 @@
# Workflow Détaillé : Création de Mission - Prisma, Minio et N8N
## 📋 Vue d'Ensemble
Ce document trace **chaque étape** du workflow de création de mission, depuis le formulaire frontend jusqu'aux intégrations externes via N8N, en passant par Prisma (base de données) et Minio (stockage de fichiers).
---
## 🔄 Flux Complet - Vue d'Ensemble
```
┌─────────────────────────────────────────────────────────────┐
│ 1. FRONTEND - MissionsAdminPanel │
│ - Validation des champs │
│ - Préparation des données (base64 pour fichiers) │
│ - POST /api/missions │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ 2. BACKEND - POST /api/missions │
│ ├─ Authentification (NextAuth) │
│ ├─ Validation des champs requis │
│ ├─ STEP 1: Prisma.mission.create() │
│ ├─ STEP 2: Prisma.missionUser.createMany() │
│ ├─ STEP 3: Upload Logo → Minio │
│ ├─ STEP 4: Upload Attachments → Minio │
│ ├─ STEP 5: Vérification fichiers Minio │
│ └─ STEP 6: N8N Workflow Trigger │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ 3. N8N WORKFLOW │
│ - Création Leantime Project │
│ - Création Outline Collection │
│ - Création RocketChat Channel │
│ - Création Gitea Repository │
│ - Création Penpot Project │
└─────────────────────────────────────────────────────────────┘
```
---
## 📝 ÉTAPE 1 : Frontend - Préparation des Données
### Fichier : `components/missions/missions-admin-panel.tsx`
### 1.1 Validation (`validateMission()`)
**Lignes 369-397**
```typescript
const validateMission = () => {
const requiredFields = {
name: !!missionData.name,
oddScope: Array.isArray(missionData.oddScope) && missionData.oddScope.length > 0,
niveau: !!missionData.niveau,
intention: !!missionData.intention,
missionType: !!missionData.missionType,
donneurDOrdre: !!missionData.donneurDOrdre,
projection: !!missionData.projection,
participation: !!missionData.participation,
gardiens: gardienDuTemps !== null &&
gardienDeLaParole !== null &&
gardienDeLaMemoire !== null
};
// Vérifie que tous les champs requis sont remplis
// Retourne false si un champ manque
}
```
**Champs requis** :
- ✅ `name` : Nom de la mission
- ✅ `oddScope` : Array avec au moins 1 ODD
- ✅ `niveau` : A/B/C/S
- ✅ `intention` : Description
- ✅ `missionType` : remote/onsite/hybrid
- ✅ `donneurDOrdre` : individual/group/organization
- ✅ `projection` : short/medium/long
- ✅ `participation` : volontaire/cooptation
- ✅ `gardiens` : Les 3 gardiens doivent être assignés
### 1.2 Préparation des Données (`handleSubmitMission()`)
**Lignes 400-460**
```typescript
const handleSubmitMission = async () => {
// 1. Validation
if (!validateMission()) return;
// 2. Préparation des gardiens
const guardians = {
"gardien-temps": gardienDuTemps,
"gardien-parole": gardienDeLaParole,
"gardien-memoire": gardienDeLaMemoire
};
// 3. Construction de l'objet de soumission
const missionSubmitData = {
...missionData, // name, oddScope, niveau, intention, etc.
services: selectedServices, // ["Gite", "ArtLab", "Calcul"]
profils: selectedProfils, // ["DataIntelligence", "Expression", ...]
guardians, // { "gardien-temps": userId, ... }
volunteers: volontaires, // [userId1, userId2, ...]
logo: missionData.logo // { data: "data:image/png;base64,...", name, type }
};
// 4. Envoi à l'API
const response = await fetch('/api/missions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(missionSubmitData)
});
}
```
**Format du logo** (si présent) :
```typescript
logo: {
data: "data:image/png;base64,iVBORw0KGgoAAAANS...", // Base64 avec préfixe
name: "logo.png",
type: "image/png"
}
```
**Format des attachments** (si présents) :
```typescript
attachments: [
{
data: "data:application/pdf;base64,JVBERi0xLjQKJe...",
name: "document.pdf",
type: "application/pdf"
}
]
```
---
## 🗄️ ÉTAPE 2 : Backend - POST /api/missions
### Fichier : `app/api/missions/route.ts`
### 2.1 Authentification et Validation
**Lignes 205-224**
```typescript
export async function POST(request: Request) {
// 1. Vérification de l'authentification
const { authorized, userId } = await checkAuth(request);
if (!authorized || !userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
}
// 2. Parsing du body
const body = await request.json();
// 3. Validation minimale (name et oddScope requis)
if (!body.name || !body.oddScope) {
return NextResponse.json({
error: 'Missing required fields',
missingFields: ['name', 'oddScope'].filter(field => !body[field])
}, { status: 400 });
}
}
```
### 2.2 STEP 1 : Création de la Mission en Base de Données
**Lignes 226-248**
```typescript
// Préparation des données pour Prisma
const missionData = {
name: body.name,
oddScope: body.oddScope, // Array de strings
niveau: body.niveau,
intention: body.intention,
missionType: body.missionType,
donneurDOrdre: body.donneurDOrdre,
projection: body.projection,
services: body.services, // Array de strings
profils: body.profils, // Array de strings
participation: body.participation,
creatorId: userId, // ID de l'utilisateur connecté
logo: null, // Sera mis à jour après upload
};
// Création en base de données
const mission = await prisma.mission.create({
data: missionData
});
// Résultat : mission.id est maintenant disponible
// Exemple : mission.id = "abc-123-def-456"
```
**Schéma Prisma** :
```prisma
model Mission {
id String @id @default(uuid())
name String
logo String? // null pour l'instant
oddScope String[]
niveau String
intention String
missionType String
donneurDOrdre String
projection String
services String[]
participation String?
profils String[]
creatorId String
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
// Relations
creator User @relation(...)
missionUsers MissionUser[]
attachments Attachment[]
}
```
**Points importants** :
- ✅ La mission est créée **AVANT** l'upload des fichiers
- ✅ Le `mission.id` est généré automatiquement (UUID)
- ✅ Le champ `logo` est `null` pour l'instant
- ✅ Tous les champs sont sauvegardés sauf les fichiers
### 2.3 STEP 2 : Création des MissionUsers (Gardiens + Volontaires)
**Lignes 250-283**
```typescript
// Préparation du tableau de MissionUsers
const missionUsers = [];
// 2.1 Ajout des gardiens
if (body.guardians) {
for (const [role, guardianId] of Object.entries(body.guardians)) {
if (guardianId) {
missionUsers.push({
missionId: mission.id, // ID de la mission créée
userId: guardianId, // ID de l'utilisateur gardien
role: role // "gardien-temps", "gardien-parole", "gardien-memoire"
});
}
}
}
// 2.2 Ajout des volontaires
if (body.volunteers && body.volunteers.length > 0) {
for (const volunteerId of body.volunteers) {
missionUsers.push({
missionId: mission.id,
userId: volunteerId,
role: 'volontaire'
});
}
}
// 2.3 Création en batch dans Prisma
if (missionUsers.length > 0) {
await prisma.missionUser.createMany({
data: missionUsers
});
}
```
**Schéma Prisma MissionUser** :
```prisma
model MissionUser {
id String @id @default(uuid())
role String // 'gardien-temps', 'gardien-parole', 'gardien-memoire', 'volontaire'
missionId String
userId String
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
mission Mission @relation(...)
user User @relation(...)
@@unique([missionId, userId, role]) // Un utilisateur ne peut avoir qu'un rôle par mission
}
```
**Exemple de données créées** :
```typescript
missionUsers = [
{ missionId: "abc-123", userId: "user-1", role: "gardien-temps" },
{ missionId: "abc-123", userId: "user-2", role: "gardien-parole" },
{ missionId: "abc-123", userId: "user-3", role: "gardien-memoire" },
{ missionId: "abc-123", userId: "user-4", role: "volontaire" },
{ missionId: "abc-123", userId: "user-5", role: "volontaire" }
]
```
**Points importants** :
- ✅ Utilisation de `createMany()` pour performance (1 requête au lieu de N)
- ✅ Contrainte unique : un utilisateur ne peut avoir qu'un rôle par mission
- ✅ Les gardiens sont obligatoires, les volontaires optionnels
### 2.4 STEP 3 : Upload du Logo vers Minio
**Lignes 285-310**
```typescript
let logoPath = null;
if (body.logo?.data) {
try {
// 3.1 Conversion base64 → Buffer → File
const base64Data = body.logo.data.split(',')[1]; // Retire "data:image/png;base64,"
const buffer = Buffer.from(base64Data, 'base64');
const file = new File(
[buffer],
body.logo.name || 'logo.png',
{ type: body.logo.type || 'image/png' }
);
// 3.2 Upload vers Minio
const { filePath } = await uploadMissionLogo(userId, mission.id, file);
logoPath = filePath; // Ex: "missions/abc-123/logo.png"
uploadedFiles.push({ type: 'logo', path: filePath });
// 3.3 Mise à jour de la mission avec le chemin du logo
await prisma.mission.update({
where: { id: mission.id },
data: { logo: filePath }
});
} catch (uploadError) {
throw new Error('Failed to upload logo');
}
}
```
**Fonction `uploadMissionLogo()` - `lib/mission-uploads.ts`** :
**Lignes 96-145**
```typescript
export async function uploadMissionLogo(
userId: string,
missionId: string,
file: File
): Promise<{ filePath: string }> {
// 1. Génération du chemin
const fileExtension = file.name.substring(file.name.lastIndexOf('.'));
const filePath = getMissionLogoPath(userId, missionId, fileExtension);
// Résultat : "missions/{missionId}/logo.png"
// 2. Conversion pour Minio (retire le préfixe "missions/")
const minioPath = filePath.replace(/^missions\//, '');
// Résultat : "{missionId}/logo.png"
// 3. Conversion File → Buffer
const arrayBuffer = await file.arrayBuffer();
const buffer = Buffer.from(arrayBuffer);
// 4. Upload vers Minio via S3 SDK
await s3Client.send(new PutObjectCommand({
Bucket: 'missions', // Bucket Minio
Key: minioPath, // "{missionId}/logo.png"
Body: buffer, // Contenu du fichier
ContentType: file.type, // "image/png"
ACL: 'public-read' // Accès public en lecture
}));
return { filePath }; // Retourne le chemin complet avec préfixe
}
```
**Configuration Minio** :
```typescript
// lib/mission-uploads.ts
const s3Client = new S3Client({
region: 'us-east-1',
endpoint: 'https://dome-api.slm-lab.net', // Endpoint Minio
credentials: {
accessKeyId: '4aBT4CMb7JIMMyUtp4Pl',
secretAccessKey: 'HGn39XhCIlqOjmDVzRK9MED2Fci2rYvDDgbLFElg'
},
forcePathStyle: true // Requis pour MinIO
});
```
**Structure dans Minio** :
```
Bucket: missions
└── {missionId}/
└── logo.png
```
**Points importants** :
- ✅ Le logo est uploadé **APRÈS** la création de la mission (pour avoir le missionId)
- ✅ Le chemin est mis à jour dans Prisma après l'upload
- ✅ Le fichier est stocké avec ACL `public-read` pour accès public
- ✅ Le chemin stocké inclut le préfixe `missions/` pour cohérence
### 2.5 STEP 4 : Upload des Attachments vers Minio
**Lignes 312-343**
```typescript
if (body.attachments && body.attachments.length > 0) {
try {
// 4.1 Traitement parallèle de tous les attachments
const attachmentPromises = body.attachments.map(async (attachment: any) => {
// Conversion base64 → Buffer → File
const base64Data = attachment.data.split(',')[1];
const buffer = Buffer.from(base64Data, 'base64');
const file = new File(
[buffer],
attachment.name || 'attachment',
{ type: attachment.type || 'application/octet-stream' }
);
// 4.2 Upload vers Minio
const { filePath, filename, fileType, fileSize } =
await uploadMissionAttachment(userId, mission.id, file);
uploadedFiles.push({ type: 'attachment', path: filePath });
// 4.3 Création de l'enregistrement Attachment en base
return prisma.attachment.create({
data: {
missionId: mission.id,
filename, // "document.pdf"
filePath, // "missions/abc-123/attachments/document.pdf"
fileType, // "application/pdf"
fileSize, // 123456 (bytes)
uploaderId: userId
}
});
});
// 4.4 Attente de tous les uploads (parallèle)
await Promise.all(attachmentPromises);
} catch (attachmentError) {
throw new Error('Failed to upload attachments');
}
}
```
**Fonction `uploadMissionAttachment()` - `lib/mission-uploads.ts`** :
**Lignes 148-210**
```typescript
export async function uploadMissionAttachment(
userId: string,
missionId: string,
file: File
): Promise<{
filename: string;
filePath: string;
fileType: string;
fileSize: number;
}> {
// 1. Génération du chemin
const filePath = getMissionAttachmentPath(userId, missionId, file.name);
// Résultat : "missions/{missionId}/attachments/{filename}"
// 2. Conversion pour Minio
const minioPath = filePath.replace(/^missions\//, '');
// Résultat : "{missionId}/attachments/{filename}"
// 3. Conversion File → Buffer
const arrayBuffer = await file.arrayBuffer();
const buffer = Buffer.from(arrayBuffer);
// 4. Upload vers Minio
await s3Client.send(new PutObjectCommand({
Bucket: 'missions',
Key: minioPath,
Body: buffer,
ContentType: file.type,
ACL: 'public-read'
}));
return {
filename: file.name,
filePath, // Chemin complet avec préfixe
fileType: file.type,
fileSize: file.size
};
}
```
**Schéma Prisma Attachment** :
```prisma
model Attachment {
id String @id @default(uuid())
filename String // "document.pdf"
filePath String // "missions/{missionId}/attachments/{filename}"
fileType String // "application/pdf"
fileSize Int // 123456
missionId String
uploaderId String
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
mission Mission @relation(...)
uploader User @relation(...)
}
```
**Structure dans Minio** :
```
Bucket: missions
└── {missionId}/
├── logo.png
└── attachments/
├── document1.pdf
├── document2.docx
└── image.jpg
```
**Points importants** :
- ✅ Uploads en **parallèle** avec `Promise.all()` pour performance
- ✅ Chaque attachment crée un enregistrement Prisma séparé
- ✅ Le `uploaderId` est l'utilisateur qui a créé la mission
- ✅ Les fichiers sont stockés avec ACL `public-read`
### 2.6 STEP 5 : Vérification des Fichiers dans Minio
**Lignes 345-365**
```typescript
// 5.1 Vérification du logo
if (logoPath) {
const logoExists = await verifyFileExists(logoPath);
if (!logoExists) {
throw new Error('Logo file not found in Minio');
}
}
// 5.2 Vérification des attachments
if (body.attachments?.length > 0) {
const attachmentVerifications = uploadedFiles
.filter(f => f.type === 'attachment')
.map(f => verifyFileExists(f.path));
const attachmentResults = await Promise.all(attachmentVerifications);
if (attachmentResults.some(exists => !exists)) {
throw new Error('One or more attachment files not found in Minio');
}
}
```
**Fonction `verifyFileExists()` - Lignes 191-202**
```typescript
async function verifyFileExists(filePath: string): Promise<boolean> {
try {
await s3Client.send(new HeadObjectCommand({
Bucket: 'missions',
Key: filePath.replace('missions/', '') // Retire le préfixe
}));
return true; // Fichier existe
} catch (error) {
return false; // Fichier n'existe pas
}
}
```
**Points importants** :
- ✅ Vérification **AVANT** de déclencher N8N
- ✅ Utilise `HeadObjectCommand` (légère, ne télécharge pas le fichier)
- ✅ Si un fichier manque, le workflow s'arrête avec erreur
### 2.7 STEP 6 : Déclenchement du Workflow N8N
**Lignes 367-393**
```typescript
// 6.1 Préparation des données pour N8N
const n8nData = {
...body, // Toutes les données de la mission
creatorId: userId, // ID du créateur
logoPath: logoPath, // Chemin du logo (ou null)
config: {
N8N_API_KEY: process.env.N8N_API_KEY,
MISSION_API_URL: process.env.NEXT_PUBLIC_API_URL
}
};
// 6.2 Déclenchement du workflow
const n8nService = new N8nService();
const workflowResult = await n8nService.triggerMissionCreation(n8nData);
// 6.3 Vérification du résultat
if (!workflowResult.success) {
throw new Error(workflowResult.error || 'N8N workflow failed');
}
// 6.4 Retour succès
return NextResponse.json({
success: true,
mission,
message: 'Mission created successfully with all integrations'
});
```
---
## 🔗 ÉTAPE 3 : Service N8N
### Fichier : `lib/services/n8n-service.ts`
### 3.1 Configuration
**Lignes 3-17**
```typescript
export class N8nService {
private webhookUrl: string;
private rollbackWebhookUrl: string;
private apiKey: string;
constructor() {
this.webhookUrl = process.env.N8N_WEBHOOK_URL ||
'https://brain.slm-lab.net/webhook/mission-created';
this.rollbackWebhookUrl = process.env.N8N_ROLLBACK_WEBHOOK_URL ||
'https://brain.slm-lab.net/webhook/mission-rollback';
this.apiKey = process.env.N8N_API_KEY || '';
}
}
```
### 3.2 Nettoyage et Validation des Données
**Lignes 19-49**
```typescript
async triggerMissionCreation(data: any): Promise<any> {
// Nettoyage des données
const cleanData = {
name: data.name,
oddScope: Array.isArray(data.oddScope) ? data.oddScope : [data.oddScope],
niveau: data.niveau || 'default',
intention: data.intention?.trim() || '',
missionType: data.missionType || 'default',
donneurDOrdre: data.donneurDOrdre || 'default',
projection: data.projection || 'default',
services: Array.isArray(data.services) ? data.services : [],
participation: data.participation || 'default',
profils: Array.isArray(data.profils) ? data.profils : [],
guardians: data.guardians || {},
volunteers: Array.isArray(data.volunteers) ? data.volunteers : [],
creatorId: data.creatorId,
config: {
...data.config,
N8N_API_KEY: this.apiKey,
MISSION_API_URL: process.env.NEXT_PUBLIC_API_URL || 'https://api.slm-lab.net/api'
}
};
}
```
**Points importants** :
- ✅ Normalisation des arrays (assure qu'ils sont bien des arrays)
- ✅ Valeurs par défaut pour éviter les undefined
- ✅ Trim de l'intention pour retirer les espaces
- ✅ Conservation de la config avec les clés API
### 3.3 Envoi au Webhook N8N
**Lignes 73-96**
```typescript
const response = await fetch(this.webhookUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': this.apiKey // Authentification
},
body: JSON.stringify(cleanData)
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`HTTP error! status: ${response.status}, body: ${errorText}`);
}
```
### 3.4 Traitement de la Réponse
**Lignes 98-143**
```typescript
const responseText = await response.text();
try {
const result = JSON.parse(responseText);
// Détection d'erreurs partielles
if (result.error || result.message?.includes('failed')) {
const errorMessage = result.message || result.error;
const failedServices = {
gitRepo: errorMessage.includes('Git repository creation failed'),
leantimeProject: errorMessage.includes('Leantime project creation failed'),
docCollection: errorMessage.includes('Documentation collection creation failed'),
rocketChatChannel: errorMessage.includes('RocketChat channel creation failed')
};
// Retourne succès avec erreurs partielles
return {
success: true,
results: { ...result, failedServices }
};
}
return { success: true, results: result };
} catch (parseError) {
// Si la réponse n'est pas JSON, considère comme succès
return {
success: true,
results: {
logoUrl: null,
leantimeProjectId: null,
outlineCollectionId: null,
rocketChatChannelId: null,
giteaRepositoryUrl: null
}
};
}
```
**Format de réponse attendu de N8N** :
```json
{
"leantimeProjectId": "project-123",
"outlineCollectionId": "collection-456",
"rocketChatChannelId": "channel-789",
"giteaRepositoryUrl": "https://git.slm-lab.net/mission-abc",
"penpotProjectId": "penpot-xyz"
}
```
**Intégrations créées par N8N** :
1. **Leantime** : Projet de gestion de projet
2. **Outline** : Collection de documentation
3. **RocketChat** : Canal de communication
4. **Gitea** : Repository Git
5. **Penpot** : Projet de design
**Points importants** :
- ✅ Gestion des erreurs partielles (certains services peuvent échouer)
- ✅ Si la réponse n'est pas JSON, considère comme succès (workflow déclenché)
- ✅ Les IDs retournés ne sont **PAS** sauvegardés en base (TODO)
---
## 🧹 Gestion des Erreurs et Cleanup
### Fichier : `app/api/missions/route.ts` - Lignes 398-418
```typescript
catch (error) {
console.error('Error in mission creation:', error);
// Cleanup: Suppression de tous les fichiers uploadés
for (const file of uploadedFiles) {
try {
await s3Client.send(new DeleteObjectCommand({
Bucket: 'missions',
Key: file.path.replace('missions/', '')
}));
console.log('Cleaned up file:', file.path);
} catch (cleanupError) {
console.error('Error cleaning up file:', file.path, cleanupError);
}
}
return NextResponse.json({
error: 'Failed to create mission',
details: error instanceof Error ? error.message : String(error)
}, { status: 500 });
}
```
**Scénarios de cleanup** :
1. ✅ Erreur lors de l'upload du logo → Suppression du logo
2. ✅ Erreur lors de l'upload d'un attachment → Suppression de tous les fichiers
3. ✅ Erreur lors de la vérification Minio → Suppression de tous les fichiers
4. ✅ Erreur N8N → Suppression de tous les fichiers
**Points importants** :
- ✅ La mission reste en base même en cas d'erreur (orphan)
- ✅ Les MissionUsers restent en base même en cas d'erreur
- ✅ Seuls les fichiers Minio sont nettoyés
- ⚠️ **TODO** : Rollback complet (suppression mission + users si erreur)
---
## 📊 Résumé des Opérations Prisma
### Requêtes Prisma dans l'ordre d'exécution :
1. **`prisma.mission.create()`**
- Crée la mission avec tous les champs
- Génère un UUID pour `id`
- `logo` = `null` initialement
2. **`prisma.missionUser.createMany()`**
- Crée tous les gardiens et volontaires en une requête
- Utilise `createMany()` pour performance
3. **`prisma.mission.update()`** (si logo)
- Met à jour le champ `logo` avec le chemin Minio
4. **`prisma.attachment.create()`** (pour chaque attachment)
- Créé en parallèle avec `Promise.all()`
- Un enregistrement par fichier
**Total** : 1 + 1 + (0 ou 1) + N requêtes Prisma
- Minimum : 2 requêtes (mission + users)
- Avec logo : 3 requêtes
- Avec N attachments : 3 + N requêtes
---
## 📦 Résumé des Opérations Minio
### Uploads Minio dans l'ordre d'exécution :
1. **Logo** (si présent)
- Bucket : `missions`
- Key : `{missionId}/logo.png`
- Path stocké : `missions/{missionId}/logo.png`
2. **Attachments** (si présents, en parallèle)
- Bucket : `missions`
- Key : `{missionId}/attachments/{filename}`
- Path stocké : `missions/{missionId}/attachments/{filename}`
3. **Vérifications** (après uploads)
- `HeadObjectCommand` pour chaque fichier
- Vérifie l'existence avant N8N
**Total** : 1 + N uploads + (1 + N) vérifications
---
## 🔄 Résumé du Workflow N8N
### Données envoyées à N8N :
```typescript
{
name: "Mission Example",
oddScope: ["odd-3"],
niveau: "a",
intention: "Description...",
missionType: "remote",
donneurDOrdre: "individual",
projection: "short",
services: ["Gite", "ArtLab"],
participation: "volontaire",
profils: ["DataIntelligence", "Expression"],
guardians: {
"gardien-temps": "user-1",
"gardien-parole": "user-2",
"gardien-memoire": "user-3"
},
volunteers: ["user-4", "user-5"],
creatorId: "user-creator",
logoPath: "missions/abc-123/logo.png",
config: {
N8N_API_KEY: "...",
MISSION_API_URL: "https://api.slm-lab.net/api"
}
}
```
### Actions N8N (workflow interne) :
1. Création projet Leantime
2. Création collection Outline
3. Création canal RocketChat
4. Création repository Gitea
5. Création projet Penpot
**Note** : Les IDs retournés ne sont **PAS** sauvegardés en base actuellement.
---
## ⚠️ Points d'Attention et TODOs
### 1. Sauvegarde des IDs N8N
**Problème** : Les IDs retournés par N8N (leantimeProjectId, etc.) ne sont pas sauvegardés en base.
**Solution proposée** :
```typescript
// Après le workflow N8N
if (workflowResult.success && workflowResult.results) {
await prisma.mission.update({
where: { id: mission.id },
data: {
leantimeProjectId: workflowResult.results.leantimeProjectId,
outlineCollectionId: workflowResult.results.outlineCollectionId,
rocketChatChannelId: workflowResult.results.rocketChatChannelId,
giteaRepositoryUrl: workflowResult.results.giteaRepositoryUrl,
penpotProjectId: workflowResult.results.penpotProjectId
}
});
}
```
### 2. Rollback Complet
**Problème** : En cas d'erreur, la mission reste en base (orphan).
**Solution proposée** :
```typescript
catch (error) {
// Suppression de la mission et des relations
await prisma.mission.delete({ where: { id: mission.id } });
// Les MissionUsers et Attachments seront supprimés en cascade
// Nettoyage Minio...
}
```
### 3. Credentials Minio Hardcodés
**Problème** : Les credentials Minio sont hardcodés dans `lib/mission-uploads.ts`.
**Solution** : Déplacer vers variables d'environnement.
### 4. Gestion des Erreurs N8N Partielles
**Problème** : Si certains services N8N échouent, on continue quand même.
**Solution** : Décider si on continue ou on rollback selon la criticité.
---
## 📈 Performance et Optimisations
### Optimisations actuelles :
- ✅ `createMany()` pour MissionUsers (1 requête au lieu de N)
- ✅ `Promise.all()` pour les attachments (parallèle)
- ✅ `HeadObjectCommand` pour vérification (léger)
### Optimisations possibles :
- 🔄 Transaction Prisma pour atomicité
- 🔄 Batch upload Minio (si supporté)
- 🔄 Retry logic pour N8N
- 🔄 Cache des vérifications Minio
---
## 🔍 Debugging et Logs
### Points de logging importants :
1. **Début du workflow** : `=== Mission Creation Started ===`
2. **Création Prisma** : `Mission created successfully`
3. **Upload Minio** : `Logo upload successful` / `Attachment upload successful`
4. **Vérification** : `verifyFileExists()` logs
5. **N8N** : `=== Starting N8N Workflow ===` / `N8N Workflow Result`
6. **Erreurs** : Tous les catch blocks loggent les erreurs
### Commandes de debug :
```bash
# Voir les logs du serveur
npm run dev # Logs dans la console
# Vérifier Minio
# Accéder à https://dome-api.slm-lab.net
# Bucket: missions
# Vérifier Prisma
npx prisma studio # Interface graphique
```
---
**Document généré le** : $(date)
**Version** : 1.0
**Auteur** : Analyse complète du codebase

View File

@ -1,377 +0,0 @@
# Analyse : IDs Vides lors de la Suppression de Mission
## 🔍 Problème
Lors de la suppression d'une mission, N8N reçoit des IDs vides :
```json
{
"repoName": "",
"leantimeProjectId": 0,
"documentationCollectionId": "",
"rocketchatChannelId": "",
"giteaRepositoryUrl": null,
"outlineCollectionId": null,
"rocketChatChannelId": null
}
```
**Cela signifie que les IDs ne sont PAS sauvegardés en base lors de la création.**
---
## 🔄 Flow de Création (Théorique)
```
1. POST /api/missions
2. Crée mission en Prisma (sans IDs)
3. Upload logo dans Minio
4. POST N8N webhook (mission-created)
5. N8N crée intégrations :
- Gitea → retourne html_url
- Leantime → retourne projectId
- Outline → retourne collectionId
- RocketChat → retourne channelId
6. N8N → POST /mission-created (avec les IDs)
7. Backend sauvegarde les IDs en base ✅
```
---
## ❌ Problèmes Possibles
### Problème 1: N8N n'appelle pas `/mission-created`
**Symptôme** : Les IDs ne sont jamais sauvegardés
**Vérification** :
- Vérifier les logs N8N : le node "Save Mission To API" est-il exécuté ?
- Vérifier les logs backend : y a-t-il des appels à `/mission-created` ?
- Vérifier les erreurs N8N : le workflow s'arrête-t-il avant "Save Mission To API" ?
**Solution** :
- Vérifier que le node "Save Mission To API" est bien connecté dans le workflow
- Vérifier que l'URL est correcte : `{{ MISSION_API_URL }}/mission-created`
- Vérifier que l'API key est correcte dans les headers
---
### Problème 2: N8N appelle `/mission-created` mais sans `missionId`
**Symptôme** : L'endpoint ne trouve pas la mission
**Vérification** :
- Vérifier les logs backend :
```
=== Mission Created Webhook Received ===
Looking up mission by ID: ...
Mission not found: ...
```
- Vérifier le body reçu par `/mission-created` : contient-il `missionId` ?
**Solution** :
- Modifier le workflow N8N pour inclure `missionId` dans le body :
```json
{
"missionId": "={{ $node['Process Mission Data'].json.missionId }}",
...
}
```
---
### Problème 3: N8N appelle `/mission-created` mais les IDs sont vides
**Symptôme** : L'endpoint trouve la mission mais les IDs sont `null` ou vides
**Vérification** :
- Vérifier les logs backend :
```
Received mission-created data: {
missionId: "...",
gitRepoUrl: null, // ❌ Vide
leantimeProjectId: null, // ❌ Vide
...
}
```
- Vérifier les logs N8N : les nodes de création retournent-ils bien les IDs ?
**Solution** :
- Vérifier que les nodes N8N (Create Git Repository, Create Leantime Project, etc.) retournent bien les IDs
- Vérifier que le node "Combine Results" combine correctement les IDs
- Vérifier que le node "Save Mission To API" utilise les bons chemins pour les IDs
---
### Problème 4: Mapping incorrect des champs
**Symptôme** : Les IDs sont envoyés mais avec des noms incorrects
**Vérification** :
- Vérifier le body envoyé par N8N :
```json
{
"gitRepoUrl": "...", // ✅ Correct
"leantimeProjectId": "...", // ✅ Correct
"documentationCollectionId": "...", // ✅ Correct
"rocketchatChannelId": "..." // ✅ Correct
}
```
- Vérifier le mapping dans `/mission-created` :
- `gitRepoUrl``giteaRepositoryUrl`
- `documentationCollectionId``outlineCollectionId`
- `rocketchatChannelId``rocketChatChannelId`
**Solution** :
- Vérifier que les noms de champs correspondent exactement
---
### Problème 5: API Key incorrecte
**Symptôme** : L'endpoint retourne 401 Unauthorized
**Vérification** :
- Vérifier les logs backend :
```
Invalid API key: { received: '...', expected: '...' }
```
- Vérifier que `N8N_API_KEY` est bien configuré dans l'environnement
- Vérifier que N8N envoie bien `x-api-key` dans les headers
**Solution** :
- Vérifier la variable d'environnement `N8N_API_KEY`
- Vérifier que N8N utilise la bonne API key dans le header
---
## 🔍 Points de Vérification
### 1. Vérifier les Logs Backend
**Lors de la création** :
```
=== Starting N8N Workflow ===
Sending to N8N: { missionId: "...", ... }
N8N Workflow Result: { success: true, ... }
```
**Lors de l'appel `/mission-created`** :
```
=== Mission Created Webhook Received ===
Received mission-created data: { ... }
Looking up mission by ID: ...
Found mission: { id: "...", ... }
Mission updated successfully: { ... }
```
**Si ces logs n'apparaissent pas** → N8N n'appelle pas `/mission-created`
---
### 2. Vérifier les Logs N8N
**Dans le workflow N8N** :
- Le node "Save Mission To API" est-il exécuté ?
- Y a-t-il des erreurs dans ce node ?
- Le body envoyé contient-il les IDs ?
**Vérifier le body du node "Save Mission To API"** :
```json
{
"missionId": "={{ $node['Process Mission Data'].json.missionId }}",
"gitRepoUrl": "={{ $node['Combine Results'].json.gitRepo?.html_url }}",
"leantimeProjectId": "={{ $node['Combine Results'].json.leantimeProject?.result?.[0] }}",
"documentationCollectionId": "={{ $node['Combine Results'].json.docCollection?.data?.id }}",
"rocketchatChannelId": "={{ $node['Combine Results'].json.rocketChatChannel?.channel?._id }}"
}
```
---
### 3. Vérifier la Base de Données
**Requête SQL** :
```sql
SELECT
id,
name,
giteaRepositoryUrl,
leantimeProjectId,
outlineCollectionId,
rocketChatChannelId,
createdAt,
updatedAt
FROM Mission
WHERE name = 'Creation'
ORDER BY createdAt DESC
LIMIT 1;
```
**Résultat attendu** :
```
id: cd0225cf-8dfd-4bf0-a20a-6aa9c04ebb42
name: Creation
giteaRepositoryUrl: https://gite.slm-lab.net/alma/creation ✅
leantimeProjectId: 123 ✅
outlineCollectionId: collection-456 ✅
rocketChatChannelId: channel-789 ✅
```
**Si tous les IDs sont `null`** → Ils ne sont pas sauvegardés
---
## 📋 Checklist de Diagnostic
### Étape 1: Vérifier que N8N reçoit missionId
- [ ] Les logs backend montrent `missionId` dans `n8nData`
- [ ] N8N reçoit bien `missionId` dans le webhook
### Étape 2: Vérifier que N8N crée les intégrations
- [ ] Les logs N8N montrent que les nodes de création sont exécutés
- [ ] Les nodes retournent bien les IDs (html_url, projectId, etc.)
### Étape 3: Vérifier que N8N combine les résultats
- [ ] Le node "Combine Results" contient les IDs
- [ ] Les IDs sont accessibles via les chemins corrects
### Étape 4: Vérifier que N8N appelle `/mission-created`
- [ ] Les logs backend montrent des appels à `/mission-created`
- [ ] Le node "Save Mission To API" est exécuté dans N8N
- [ ] Pas d'erreur 401 (API key) ou 404 (mission not found)
### Étape 5: Vérifier que les IDs sont sauvegardés
- [ ] Les logs backend montrent "Mission updated successfully"
- [ ] La base de données contient les IDs après création
- [ ] Les IDs sont correctement mappés
---
## 🎯 Actions Recommandées (Sans Toucher au Code)
### 1. Vérifier les Logs Backend
```bash
# Chercher les appels à /mission-created
grep "Mission Created Webhook Received" logs.txt
# Chercher les erreurs
grep "Mission not found" logs.txt
grep "Invalid API key" logs.txt
```
### 2. Vérifier le Workflow N8N
1. Ouvrir le workflow `NeahMissionCreate`
2. Vérifier que le node "Save Mission To API" :
- Est bien connecté après "Combine Results"
- Contient `missionId` dans le body
- Utilise les bons chemins pour les IDs
- A l'URL correcte : `{{ MISSION_API_URL }}/mission-created`
- A l'API key correcte dans les headers
### 3. Tester Manuellement
**Appel manuel à `/mission-created`** :
```bash
curl -X POST https://hub.slm-lab.net/api/missions/mission-created \
-H "Content-Type: application/json" \
-H "x-api-key: YOUR_API_KEY" \
-d '{
"missionId": "cd0225cf-8dfd-4bf0-a20a-6aa9c04ebb42",
"name": "Creation",
"creatorId": "user-id",
"gitRepoUrl": "https://gite.slm-lab.net/alma/creation",
"leantimeProjectId": "123",
"documentationCollectionId": "collection-456",
"rocketchatChannelId": "channel-789"
}'
```
**Vérifier la réponse** :
- 200 OK → L'endpoint fonctionne
- 401 Unauthorized → Problème d'API key
- 404 Not Found → Problème de recherche de mission
- 400 Bad Request → Problème de validation
### 4. Vérifier la Base de Données
```sql
-- Vérifier une mission spécifique
SELECT * FROM Mission WHERE id = 'cd0225cf-8dfd-4bf0-a20a-6aa9c04ebb42';
-- Vérifier les missions récentes sans IDs
SELECT id, name, createdAt
FROM Mission
WHERE giteaRepositoryUrl IS NULL
AND leantimeProjectId IS NULL
AND outlineCollectionId IS NULL
AND rocketChatChannelId IS NULL
ORDER BY createdAt DESC
LIMIT 10;
```
---
## 🔧 Solutions Probables
### Solution 1: Ajouter missionId dans N8N
**Dans le node "Save Mission To API"** :
```json
{
"missionId": "={{ $node['Process Mission Data'].json.missionId }}",
...
}
```
### Solution 2: Vérifier les Chemins des IDs dans N8N
**Vérifier que les chemins sont corrects** :
- `gitRepo.html_url` (pas `gitRepo.body.html_url`)
- `leantimeProject.result[0]` (array avec index 0)
- `docCollection.data.id` (pas `docCollection.id`)
- `rocketChatChannel.channel._id` (pas `rocketChatChannel._id`)
### Solution 3: Vérifier l'API Key
**Vérifier que** :
- `N8N_API_KEY` est configuré dans `.env`
- N8N utilise la même clé dans le header `x-api-key`
---
## 📝 Résumé
**Le problème est que les IDs ne sont pas sauvegardés en base.**
**Causes possibles** :
1. ❌ N8N n'appelle pas `/mission-created`
2. ❌ N8N appelle mais sans `missionId` → mission non trouvée
3. ❌ N8N appelle mais les IDs sont vides dans le body
4. ❌ API key incorrecte → 401 Unauthorized
5. ❌ Mapping incorrect des champs
**Action immédiate** :
1. Vérifier les logs backend pour voir si `/mission-created` est appelé
2. Vérifier le workflow N8N pour voir si `missionId` est inclus
3. Vérifier la base de données pour voir si les IDs sont sauvegardés
---
**Date**: $(date)
**Status**: Analyse sans modification de code
**Action Requise**: Vérification des logs et du workflow N8N

View File

@ -1,625 +0,0 @@
# Workflow Détaillé : Suppression de Mission - Centrale (/missions)
## 📋 Vue d'Ensemble
Ce document trace **chaque étape** du workflow de suppression d'une mission depuis la page Centrale (`/missions/[missionId]`), incluant les vérifications de permissions, la suppression des fichiers Minio, et la suppression en base de données.
---
## 🔄 Flux Complet - Vue d'Ensemble
```
┌─────────────────────────────────────────────────────────────┐
│ 1. FRONTEND - MissionDetailPage │
│ - Clic sur bouton "Supprimer" │
│ - Confirmation utilisateur (confirm dialog) │
│ - DELETE /api/missions/[missionId] │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ 2. BACKEND - DELETE /api/missions/[missionId] │
│ ├─ Authentification (NextAuth) │
│ ├─ Vérification mission existe │
│ ├─ Vérification permissions (créateur ou admin) │
│ ├─ STEP 1: Suppression logo Minio (si existe) │
│ ├─ STEP 2: Rollback N8N (TODO - non implémenté) │
│ └─ STEP 3: Suppression mission Prisma (CASCADE) │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ 3. PRISMA CASCADE │
│ - Suppression automatique MissionUsers │
│ - Suppression automatique Attachments │
│ ⚠️ Fichiers Minio des attachments NON supprimés │
└─────────────────────────────────────────────────────────────┘
```
---
## 📝 ÉTAPE 1 : Frontend - Clic sur Supprimer
### Fichier : `app/missions/[missionId]/page.tsx`
### 1.1 Bouton Supprimer
**Lignes 397-411**
```typescript
<Button
variant="outline"
className="flex items-center gap-2 border-red-600 text-red-600 hover:bg-red-50 bg-white"
onClick={handleDeleteMission}
disabled={deleting}
>
{deleting ? (
<div className="animate-spin rounded-full h-4 w-4 border-t-2 border-b-2 border-red-600"></div>
) : (
<Trash2 className="h-4 w-4" />
)}
Supprimer
</Button>
```
**Caractéristiques** :
- ✅ Bouton rouge avec icône Trash2
- ✅ Désactivé pendant la suppression (`deleting` state)
- ✅ Affiche un spinner pendant le traitement
### 1.2 Handler de Suppression
**Lignes 143-176**
```typescript
const handleDeleteMission = async () => {
// 1. Confirmation utilisateur
if (!confirm("Êtes-vous sûr de vouloir supprimer cette mission ? Cette action est irréversible.")) {
return; // Annulation si l'utilisateur refuse
}
try {
setDeleting(true); // Active le spinner
// 2. Appel API DELETE
const response = await fetch(`/api/missions/${missionId}`, {
method: 'DELETE',
});
// 3. Vérification de la réponse
if (!response.ok) {
throw new Error('Failed to delete mission');
}
// 4. Notification de succès
toast({
title: "Mission supprimée",
description: "La mission a été supprimée avec succès",
});
// 5. Redirection vers la liste des missions
router.push('/missions');
} catch (error) {
console.error('Error deleting mission:', error);
toast({
title: "Erreur",
description: "Impossible de supprimer la mission",
variant: "destructive",
});
} finally {
setDeleting(false); // Désactive le spinner
}
};
```
**Points importants** :
- ✅ **Double confirmation** : Dialog natif du navigateur
- ✅ **Gestion d'état** : `deleting` pour le spinner
- ✅ **Redirection automatique** vers `/missions` en cas de succès
- ✅ **Gestion d'erreurs** avec toast notification
---
## 🗄️ ÉTAPE 2 : Backend - DELETE /api/missions/[missionId]
### Fichier : `app/api/missions/[missionId]/route.ts`
### 2.1 Authentification et Récupération de la Mission
**Lignes 292-316**
```typescript
export async function DELETE(
request: Request,
props: { params: Promise<{ missionId: string }> }
) {
const params = await props.params;
try {
// 1. Vérification de l'authentification
const session = await getServerSession(authOptions);
if (!session?.user) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
}
// 2. Récupération de la mission avec relations
const mission = await prisma.mission.findUnique({
where: { id: params.missionId },
include: {
missionUsers: {
include: {
user: true // Inclut les infos des utilisateurs
}
}
}
});
// 3. Vérification que la mission existe
if (!mission) {
return NextResponse.json({ error: 'Mission not found' }, { status: 404 });
}
```
**Points importants** :
- ✅ Vérification de session NextAuth
- ✅ Récupération de la mission avec `missionUsers` (pour logging/info)
- ✅ Retour 404 si mission n'existe pas
### 2.2 Vérification des Permissions
**Lignes 318-324**
```typescript
// Vérification : utilisateur doit être créateur OU admin
const isCreator = mission.creatorId === session.user.id;
const userRoles = Array.isArray(session.user.role) ? session.user.role : [];
const isAdmin = userRoles.includes('admin') || userRoles.includes('ADMIN');
if (!isCreator && !isAdmin) {
return NextResponse.json({ error: 'Forbidden' }, { status: 403 });
}
```
**Règles de permissions** :
- ✅ **Créateur** : Peut supprimer sa propre mission
- ✅ **Admin** : Peut supprimer n'importe quelle mission
- ❌ **Autres utilisateurs** : Même gardiens/volontaires ne peuvent pas supprimer
**Points importants** :
- ✅ Vérification stricte : seul le créateur ou un admin peut supprimer
- ✅ Les gardiens (même gardien-memoire) ne peuvent pas supprimer
- ✅ Retour 403 Forbidden si pas autorisé
### 2.3 STEP 1 : Suppression du Logo dans Minio
**Lignes 326-334**
```typescript
// Suppression du logo si présent
if (mission.logo) {
try {
await deleteMissionLogo(params.missionId, mission.logo);
} catch (error) {
console.error('Error deleting mission logo:', error);
// Continue deletion even if logo deletion fails
}
}
```
**Fonction `deleteMissionLogo()` - `lib/mission-uploads.ts`** :
**Lignes 42-61**
```typescript
export async function deleteMissionLogo(
missionId: string,
logoPath: string
): Promise<void> {
try {
const normalizedPath = ensureMissionsPrefix(logoPath);
// ⚠️ TODO: La fonction ne fait que logger, ne supprime pas vraiment !
console.log('Deleting mission logo:', {
missionId,
originalPath: logoPath,
normalizedPath
});
// TODO: Implémenter la suppression réelle avec DeleteObjectCommand
} catch (error) {
console.error('Error deleting mission logo:', error);
throw error;
}
}
```
**⚠️ PROBLÈME IDENTIFIÉ** :
- ❌ La fonction `deleteMissionLogo()` ne supprime **PAS** réellement le fichier
- ❌ Elle ne fait que logger les informations
- ⚠️ Le logo reste dans Minio après suppression de la mission
**Solution proposée** :
```typescript
export async function deleteMissionLogo(
missionId: string,
logoPath: string
): Promise<void> {
try {
const { DeleteObjectCommand } = await import('@aws-sdk/client-s3');
const normalizedPath = ensureMissionsPrefix(logoPath);
const minioPath = normalizedPath.replace(/^missions\//, '');
await s3Client.send(new DeleteObjectCommand({
Bucket: 'missions',
Key: minioPath
}));
console.log('Mission logo deleted successfully:', minioPath);
} catch (error) {
console.error('Error deleting mission logo:', error);
throw error;
}
}
```
**Points importants** :
- ✅ Continue la suppression même si le logo échoue (try/catch)
- ⚠️ **BUG** : Le logo n'est pas réellement supprimé actuellement
### 2.4 STEP 2 : Rollback N8N (TODO - Non Implémenté)
**Lignes 336-344**
```typescript
// Trigger n8n workflow for rollback
// TODO: Implement rollbackMission method in N8nService
// const n8nService = new N8nService();
// try {
// await n8nService.rollbackMission(mission);
// } catch (error) {
// console.error('Error during mission rollback:', error);
// // Continue with mission deletion even if rollback fails
// }
```
**⚠️ NON IMPLÉMENTÉ** :
- ❌ Le rollback N8N n'est pas appelé
- ❌ Les intégrations externes (Leantime, Outline, RocketChat, etc.) ne sont **PAS** supprimées
- ⚠️ Les ressources externes restent orphelines
**Méthode disponible mais non utilisée** :
- ✅ `N8nService.triggerMissionRollback()` existe dans `lib/services/n8n-service.ts`
- ✅ Webhook URL : `https://brain.slm-lab.net/webhook/mission-rollback`
- ❌ Mais n'est pas appelée dans le DELETE
**Solution proposée** :
```typescript
// Rollback N8N
const n8nService = new N8nService();
try {
await n8nService.triggerMissionRollback({
missionId: mission.id,
leantimeProjectId: mission.leantimeProjectId,
outlineCollectionId: mission.outlineCollectionId,
rocketChatChannelId: mission.rocketChatChannelId,
giteaRepositoryUrl: mission.giteaRepositoryUrl,
penpotProjectId: mission.penpotProjectId
});
} catch (error) {
console.error('Error during mission rollback:', error);
// Continue with mission deletion even if rollback fails
}
```
### 2.5 STEP 3 : Suppression de la Mission en Base de Données
**Lignes 346-349**
```typescript
// Suppression de la mission (CASCADE automatique)
await prisma.mission.delete({
where: { id: params.missionId }
});
```
**Schéma Prisma - Relations avec CASCADE** :
```prisma
model Mission {
// ...
attachments Attachment[]
missionUsers MissionUser[]
}
model Attachment {
mission Mission @relation(fields: [missionId], references: [id], onDelete: Cascade)
// ...
}
model MissionUser {
mission Mission @relation(fields: [missionId], references: [id], onDelete: Cascade)
// ...
}
```
**CASCADE automatique** :
- ✅ **MissionUsers** : Supprimés automatiquement (`onDelete: Cascade`)
- ✅ **Attachments** : Supprimés automatiquement (`onDelete: Cascade`)
- ❌ **Fichiers Minio** : **NON supprimés automatiquement** (pas de trigger)
**Points importants** :
- ✅ Une seule requête Prisma supprime la mission et toutes ses relations
- ✅ Atomicité : Si la suppression échoue, rien n'est supprimé
- ⚠️ **PROBLÈME** : Les fichiers Minio des attachments ne sont pas supprimés
### 2.6 Retour de Succès
**Lignes 351-358**
```typescript
return NextResponse.json({ success: true });
} catch (error) {
console.error('Error deleting mission:', error);
return NextResponse.json(
{ error: 'Failed to delete mission' },
{ status: 500 }
);
}
```
---
## 🔄 ÉTAPE 3 : Cascade Prisma
### Suppression Automatique des Relations
Quand `prisma.mission.delete()` est exécuté, Prisma supprime automatiquement :
1. **Tous les MissionUsers** associés
```sql
DELETE FROM "MissionUser" WHERE "missionId" = 'abc-123';
```
2. **Tous les Attachments** associés
```sql
DELETE FROM "Attachment" WHERE "missionId" = 'abc-123';
```
**⚠️ PROBLÈME MAJEUR** :
- ❌ Les fichiers Minio des attachments ne sont **PAS** supprimés
- ❌ Les fichiers restent dans Minio : `missions/{missionId}/attachments/*`
- ⚠️ **Orphelins** : Fichiers sans enregistrement en base
---
## 🧹 Problèmes Identifiés et Solutions
### Problème 1 : Logo non supprimé dans Minio
**Symptôme** : Le logo reste dans Minio après suppression
**Cause** : `deleteMissionLogo()` ne fait que logger, ne supprime pas
**Solution** :
```typescript
export async function deleteMissionLogo(
missionId: string,
logoPath: string
): Promise<void> {
const { DeleteObjectCommand } = await import('@aws-sdk/client-s3');
const normalizedPath = ensureMissionsPrefix(logoPath);
const minioPath = normalizedPath.replace(/^missions\//, '');
await s3Client.send(new DeleteObjectCommand({
Bucket: 'missions',
Key: minioPath
}));
}
```
### Problème 2 : Attachments non supprimés dans Minio
**Symptôme** : Les fichiers attachments restent dans Minio
**Cause** : Pas de suppression des fichiers avant la suppression Prisma
**Solution** :
```typescript
// Avant la suppression Prisma
if (mission.attachments && mission.attachments.length > 0) {
// Récupérer les attachments
const attachments = await prisma.attachment.findMany({
where: { missionId: params.missionId }
});
// Supprimer chaque fichier Minio
for (const attachment of attachments) {
try {
await deleteMissionAttachment(attachment.filePath);
} catch (error) {
console.error('Error deleting attachment file:', error);
// Continue même si un fichier échoue
}
}
}
```
### Problème 3 : Rollback N8N non implémenté
**Symptôme** : Les intégrations externes restent orphelines
**Cause** : Code commenté, non implémenté
**Solution** :
```typescript
// Décommenter et implémenter
const n8nService = new N8nService();
try {
await n8nService.triggerMissionRollback({
missionId: mission.id,
leantimeProjectId: mission.leantimeProjectId,
outlineCollectionId: mission.outlineCollectionId,
rocketChatChannelId: mission.rocketChatChannelId,
giteaRepositoryUrl: mission.giteaRepositoryUrl,
penpotProjectId: mission.penpotProjectId
});
} catch (error) {
console.error('Error during mission rollback:', error);
// Continue avec la suppression même si rollback échoue
}
```
---
## 📊 Résumé des Opérations
### Opérations Effectuées ✅
1. ✅ **Vérification authentification** : Session NextAuth
2. ✅ **Vérification permissions** : Créateur ou Admin
3. ✅ **Suppression Prisma Mission** : Avec cascade automatique
4. ✅ **Suppression Prisma MissionUsers** : Cascade automatique
5. ✅ **Suppression Prisma Attachments** : Cascade automatique
### Opérations NON Effectuées ❌
1. ❌ **Suppression logo Minio** : Fonction ne fait que logger
2. ❌ **Suppression attachments Minio** : Pas de code pour supprimer
3. ❌ **Rollback N8N** : Code commenté, non implémenté
---
## 🔍 Workflow Complet Corrigé (Proposé)
```typescript
export async function DELETE(...) {
// 1. Authentification
const session = await getServerSession(authOptions);
if (!session?.user) return 401;
// 2. Récupération mission avec attachments
const mission = await prisma.mission.findUnique({
where: { id: params.missionId },
include: {
attachments: true, // Inclure les attachments
missionUsers: true
}
});
if (!mission) return 404;
// 3. Vérification permissions
const isCreator = mission.creatorId === session.user.id;
const isAdmin = session.user.role?.includes('admin');
if (!isCreator && !isAdmin) return 403;
// 4. Suppression logo Minio
if (mission.logo) {
try {
await deleteMissionLogo(mission.id, mission.logo); // À implémenter
} catch (error) {
console.error('Error deleting logo:', error);
// Continue
}
}
// 5. Suppression attachments Minio
if (mission.attachments && mission.attachments.length > 0) {
for (const attachment of mission.attachments) {
try {
await deleteMissionAttachment(attachment.filePath);
} catch (error) {
console.error('Error deleting attachment:', error);
// Continue
}
}
}
// 6. Rollback N8N
const n8nService = new N8nService();
try {
await n8nService.triggerMissionRollback({
missionId: mission.id,
leantimeProjectId: mission.leantimeProjectId,
outlineCollectionId: mission.outlineCollectionId,
rocketChatChannelId: mission.rocketChatChannelId,
giteaRepositoryUrl: mission.giteaRepositoryUrl,
penpotProjectId: mission.penpotProjectId
});
} catch (error) {
console.error('Error during N8N rollback:', error);
// Continue même si rollback échoue
}
// 7. Suppression Prisma (CASCADE)
await prisma.mission.delete({
where: { id: params.missionId }
});
return NextResponse.json({ success: true });
}
```
---
## 📈 Ordre d'Exécution Recommandé
1. **Vérifications** (authentification, permissions, existence)
2. **Suppression fichiers Minio** (logo + attachments)
3. **Rollback N8N** (intégrations externes)
4. **Suppression Prisma** (mission + cascade automatique)
**Pourquoi cet ordre ?**
- ✅ Supprimer les fichiers avant la base pour éviter les orphelins
- ✅ Rollback N8N avant suppression Prisma pour avoir les IDs
- ✅ Suppression Prisma en dernier (point de non-retour)
---
## ⚠️ Points d'Attention
### 1. Atomicité
**Problème** : Si une étape échoue, les précédentes sont déjà faites
**Solution** : Transaction Prisma + Rollback manuel si erreur
### 2. Performance
**Problème** : Suppression séquentielle des fichiers Minio
**Solution** : `Promise.all()` pour suppressions parallèles
### 3. Gestion d'Erreurs
**Problème** : Continue même si certaines suppressions échouent
**Solution** : Décider si on continue ou on rollback selon criticité
---
## 🔍 Debugging
### Logs à surveiller :
1. **Début suppression** : `DELETE /api/missions/[id]`
2. **Permissions** : `isCreator` / `isAdmin`
3. **Suppression logo** : `Deleting mission logo`
4. **Suppression attachments** : `Deleting mission attachment`
5. **Rollback N8N** : `Triggering n8n rollback workflow`
6. **Suppression Prisma** : `prisma.mission.delete()`
### Vérifications manuelles :
```bash
# Vérifier Minio
# Accéder à https://dome-api.slm-lab.net
# Bucket: missions
# Vérifier que les dossiers {missionId} sont supprimés
# Vérifier Prisma
npx prisma studio
# Vérifier que la mission et ses relations sont supprimées
```
---
**Document généré le** : $(date)
**Version** : 1.0
**Auteur** : Analyse complète du codebase

View File

@ -1,308 +0,0 @@
# Fix : Sauvegarde des IDs d'Intégration N8N
## 🔍 Problème Identifié
Lors de la suppression d'une mission, le webhook N8N reçoit des IDs vides :
```json
{
"repoName": "",
"leantimeProjectId": 0,
"documentationCollectionId": "",
"rocketchatChannelId": ""
}
```
**Cause** : Les IDs retournés par N8N lors de la création des intégrations n'étaient **jamais sauvegardés en base**.
### Workflow Problématique
```
1. Frontend → POST /api/missions
2. Backend crée mission en Prisma
3. Backend upload fichiers Minio
4. Backend → POST N8N webhook (mission-created)
5. N8N crée intégrations (Gitea, Leantime, Outline, RocketChat)
6. N8N → POST /mission-created ❌ ENDPOINT N'EXISTAIT PAS
7. IDs jamais sauvegardés ❌
```
### Conséquence
Lors de la suppression :
- Les IDs sont `null` en base
- On envoie des valeurs vides à N8N
- N8N ne peut pas supprimer/fermer les intégrations
- Les ressources externes restent orphelines
---
## ✅ Solution Implémentée
### 1. Endpoint `/mission-created` Créé
**Fichier** : `app/api/missions/mission-created/route.ts`
**Fonctionnalités** :
- ✅ Reçoit les IDs des intégrations créées par N8N
- ✅ Vérifie l'API key (`x-api-key` header)
- ✅ Trouve la mission par `name` + `creatorId`
- ✅ Met à jour la mission avec les IDs
- ✅ Mappe correctement les champs :
- `gitRepoUrl``giteaRepositoryUrl`
- `documentationCollectionId``outlineCollectionId`
- `rocketchatChannelId``rocketChatChannelId`
- `leantimeProjectId``leantimeProjectId` (converti en string)
### 2. Format des Données
**N8N envoie** :
```json
{
"name": "Mission Example",
"creatorId": "user-id",
"gitRepoUrl": "https://gite.slm-lab.net/alma/mission-example",
"leantimeProjectId": "123",
"documentationCollectionId": "collection-456",
"rocketchatChannelId": "channel-789"
}
```
**Endpoint sauvegarde** :
```typescript
{
giteaRepositoryUrl: "https://gite.slm-lab.net/alma/mission-example",
leantimeProjectId: "123",
outlineCollectionId: "collection-456",
rocketChatChannelId: "channel-789"
}
```
### 3. Sécurité
- ✅ Vérification de l'API key (`x-api-key` header)
- ✅ Validation des champs requis (`name`, `creatorId`)
- ✅ Gestion d'erreurs complète
- ✅ Logging détaillé pour debugging
---
## 🔄 Nouveau Workflow
```
1. Frontend → POST /api/missions
2. Backend crée mission en Prisma
3. Backend upload fichiers Minio
4. Backend → POST N8N webhook (mission-created)
5. N8N crée intégrations (Gitea, Leantime, Outline, RocketChat)
6. N8N → POST /mission-created ✅ ENDPOINT EXISTE MAINTENANT
7. Backend sauvegarde les IDs ✅
8. Mission complète avec tous les IDs ✅
```
**Lors de la suppression** :
```
1. Frontend → DELETE /api/missions/[id]
2. Backend récupère mission (avec IDs sauvegardés)
3. Backend extrait/mappe les données
4. Backend → POST N8N webhook (mission-delete)
5. N8N reçoit les IDs ✅
6. N8N supprime/ferme les intégrations ✅
```
---
## 📋 Format de Requête
### POST /api/missions/mission-created
**Headers** :
```
Content-Type: application/json
x-api-key: {N8N_API_KEY}
Authorization: Bearer {keycloak_token} (optionnel)
```
**Body** :
```json
{
"name": "Mission Example",
"creatorId": "user-uuid",
"gitRepoUrl": "https://gite.slm-lab.net/alma/mission-example",
"leantimeProjectId": "123",
"documentationCollectionId": "collection-456",
"rocketchatChannelId": "channel-789",
"niveau": "default",
"intention": "...",
"description": "...",
"donneurDOrdre": "...",
"projection": "...",
"missionType": "remote"
}
```
**Réponse Succès** (200) :
```json
{
"success": true,
"message": "Mission updated successfully",
"mission": {
"id": "mission-uuid",
"name": "Mission Example",
"giteaRepositoryUrl": "https://gite.slm-lab.net/alma/mission-example",
"leantimeProjectId": "123",
"outlineCollectionId": "collection-456",
"rocketChatChannelId": "channel-789"
}
}
```
**Réponse Erreur** (400/404/500) :
```json
{
"error": "Error message",
"details": "Detailed error information"
}
```
---
## ⚠️ Missions Existantes
**Problème** : Les missions créées avant cette correction n'ont pas leurs IDs sauvegardés.
**Solutions possibles** :
### Option 1 : Migration Manuelle
Pour chaque mission existante, récupérer les IDs depuis les services externes et les mettre à jour manuellement.
### Option 2 : Script de Migration
Créer un script qui :
1. Liste toutes les missions sans IDs
2. Interroge les services externes (si possible)
3. Met à jour les missions
### Option 3 : Re-création
Supprimer et recréer les missions (si acceptable).
**Recommandation** : Option 1 pour les missions critiques, Option 2 pour un grand nombre.
---
## 🧪 Tests
### Test 1 : Création de Mission
1. Créer une nouvelle mission via le frontend
2. Vérifier que N8N appelle `/mission-created`
3. Vérifier que la mission en base a les IDs sauvegardés :
```sql
SELECT id, name, giteaRepositoryUrl, leantimeProjectId,
outlineCollectionId, rocketChatChannelId
FROM Mission
WHERE name = 'Mission Test';
```
### Test 2 : Suppression de Mission
1. Supprimer une mission avec IDs sauvegardés
2. Vérifier que N8N reçoit les IDs :
```json
{
"repoName": "mission-example",
"leantimeProjectId": 123,
"documentationCollectionId": "collection-456",
"rocketchatChannelId": "channel-789"
}
```
3. Vérifier que N8N supprime/ferme les intégrations
### Test 3 : API Key
1. Appeler `/mission-created` sans `x-api-key` → 401
2. Appeler avec mauvais `x-api-key` → 401
3. Appeler avec bon `x-api-key` → 200
---
## 📝 Logs à Surveiller
### Création
```
=== Mission Created Webhook Received ===
Received mission-created data: { ... }
Found mission: { id: "...", name: "..." }
Updating giteaRepositoryUrl: ...
Updating leantimeProjectId: ...
Mission updated successfully: { ... }
```
### Suppression
```
=== Starting N8N Deletion Workflow ===
Extracted repo name from URL: { url: "...", repoName: "..." }
Sending deletion data to N8N: { ... }
N8N Deletion Workflow Result: { success: true, ... }
```
---
## 🔧 Configuration Requise
### Variables d'Environnement
```env
N8N_API_KEY=your-api-key-here
NEXT_PUBLIC_API_URL=https://hub.slm-lab.net
```
### N8N Workflow
Le workflow N8N doit appeler :
- **URL** : `{{ MISSION_API_URL }}/mission-created`
- **Méthode** : POST
- **Headers** :
- `Content-Type: application/json`
- `x-api-key: {{ N8N_API_KEY }}`
- `Authorization: Bearer {{ Keycloak Token }}` (optionnel)
---
## ✅ Checklist
- [x] Endpoint `/mission-created` créé
- [x] Vérification API key implémentée
- [x] Mapping des champs correct
- [x] Gestion d'erreurs complète
- [x] Logging détaillé
- [ ] Tests manuels effectués
- [ ] Migration des missions existantes (si nécessaire)
- [ ] Documentation N8N mise à jour
---
**Date de correction** : $(date)
**Version** : 1.0
**Fichiers modifiés** :
- `app/api/missions/mission-created/route.ts` (nouveau)

View File

@ -1,253 +0,0 @@
# Fix: IDs d'Intégration Vides lors de la Suppression
## 🔍 Problème Identifié
Lors de la suppression d'une mission, N8N reçoit des IDs vides :
```json
{
"missionId": "cd0225cf-8dfd-4bf0-a20a-6aa9c04ebb42",
"name": "Creation",
"repoName": "",
"leantimeProjectId": 0,
"documentationCollectionId": "",
"rocketchatChannelId": "",
"giteaRepositoryUrl": null,
"outlineCollectionId": null,
"rocketChatChannelId": null
}
```
**Cause** : Les IDs retournés par N8N lors de la création ne sont **pas sauvegardés en base**.
---
## 🔍 Analyse du Problème
### Flow Actuel
```
1. POST /api/missions → Crée mission en Prisma
2. Upload logo dans Minio
3. POST N8N webhook → N8N crée intégrations
4. N8N → POST /mission-created (avec IDs)
5. ❌ Endpoint cherche mission par name + creatorId (peut échouer)
6. ❌ IDs jamais sauvegardés
7. ❌ Lors de suppression → IDs vides
```
### Problèmes Identifiés
1. **Recherche de mission fragile** : L'endpoint `/mission-created` cherche par `name` + `creatorId`, ce qui peut échouer si :
- Plusieurs missions ont le même nom
- Le nom a changé
- Le creatorId ne correspond pas exactement
2. **missionId non envoyé** : On n'envoie pas le `missionId` à N8N, donc N8N ne peut pas le renvoyer
3. **N8N ne renvoie peut-être pas missionId** : Même si on l'envoie, N8N doit le renvoyer dans `/mission-created`
---
## ✅ Solutions Implémentées
### 1. Envoyer missionId à N8N
**Fichier** : `app/api/missions/route.ts`
```typescript
const n8nData = {
...body,
missionId: mission.id, // ✅ Send missionId so N8N can return it
creatorId: userId,
logoPath: logoPath,
logoUrl: logoUrl,
config: { ... }
};
```
**Avantage** : N8N peut maintenant renvoyer le `missionId` dans `/mission-created`
### 2. Améliorer la Recherche de Mission
**Fichier** : `app/api/missions/mission-created/route.ts`
```typescript
// Prefer missionId if provided, otherwise use name + creatorId
let mission;
if (body.missionId) {
// ✅ Use missionId if provided (more reliable)
mission = await prisma.mission.findUnique({
where: { id: body.missionId }
});
} else if (body.name && body.creatorId) {
// Fallback to name + creatorId (for backward compatibility)
mission = await prisma.mission.findFirst({
where: {
name: body.name,
creatorId: body.creatorId
},
orderBy: { createdAt: 'desc' }
});
}
```
**Avantages** :
- ✅ Recherche par `missionId` (plus fiable)
- ✅ Fallback vers `name` + `creatorId` (rétrocompatibilité)
- ✅ Gestion d'erreurs améliorée
---
## 📋 Format de Requête N8N → /mission-created
### Format Recommandé (avec missionId)
```json
{
"missionId": "cd0225cf-8dfd-4bf0-a20a-6aa9c04ebb42",
"name": "Creation",
"creatorId": "user-id",
"gitRepoUrl": "https://gite.slm-lab.net/alma/creation",
"leantimeProjectId": "123",
"documentationCollectionId": "collection-456",
"rocketchatChannelId": "channel-789"
}
```
### Format de Fallback (sans missionId)
```json
{
"name": "Creation",
"creatorId": "user-id",
"gitRepoUrl": "https://gite.slm-lab.net/alma/creation",
"leantimeProjectId": "123",
"documentationCollectionId": "collection-456",
"rocketchatChannelId": "channel-789"
}
```
---
## 🔧 Action Requise dans N8N
### Modifier le Node "Save Mission To API"
Le node N8N doit inclure `missionId` dans le body :
**Avant** :
```json
{
"name": "{{ name }}",
"creatorId": "{{ creatorId }}",
"gitRepoUrl": "{{ gitRepo.html_url }}",
...
}
```
**Après** :
```json
{
"missionId": "{{ missionId }}", // ✅ Ajouter missionId
"name": "{{ name }}",
"creatorId": "{{ creatorId }}",
"gitRepoUrl": "{{ gitRepo.html_url }}",
"leantimeProjectId": "{{ leantimeProject.result[0] }}",
"documentationCollectionId": "{{ docCollection.data.id }}",
"rocketchatChannelId": "{{ rocketChatChannel.channel._id }}",
...
}
```
**Où trouver missionId dans N8N** :
- Il est dans les données initiales : `{{ $node['Process Mission Data'].json.missionId }}`
- Ou dans le body original : `{{ $json.missionId }}`
---
## 🧪 Tests
### Test 1: Vérifier missionId est envoyé à N8N
1. Créer une mission
2. Vérifier les logs :
```
Sending to N8N: { missionId: "...", ... }
```
3. ✅ `missionId` doit être présent
### Test 2: Vérifier N8N renvoie missionId
1. Vérifier les logs N8N
2. Vérifier que le node "Save Mission To API" inclut `missionId`
3. ✅ `missionId` doit être dans le body envoyé à `/mission-created`
### Test 3: Vérifier IDs sont sauvegardés
1. Créer une mission
2. Vérifier les logs :
```
=== Mission Created Webhook Received ===
Looking up mission by ID: ...
Mission updated successfully: { ... }
```
3. Vérifier en base :
```sql
SELECT id, name, giteaRepositoryUrl, leantimeProjectId,
outlineCollectionId, rocketChatChannelId
FROM Mission
WHERE id = '...';
```
4. ✅ Les IDs doivent être présents
### Test 4: Vérifier Suppression
1. Supprimer une mission avec IDs sauvegardés
2. Vérifier les logs :
```
Sending deletion data to N8N: {
repoName: "creation",
leantimeProjectId: 123,
...
}
```
3. ✅ Les IDs doivent être présents (pas vides)
---
## 📝 Checklist
- [x] Envoyer `missionId` à N8N lors de la création
- [x] Améliorer recherche de mission dans `/mission-created`
- [ ] **Modifier N8N workflow pour inclure `missionId` dans `/mission-created`**
- [ ] Tester création avec `missionId`
- [ ] Tester sauvegarde des IDs
- [ ] Tester suppression avec IDs sauvegardés
---
## ⚠️ Action Immédiate Requise
**Modifier le workflow N8N** pour inclure `missionId` dans le node "Save Mission To API" :
1. Ouvrir le workflow N8N `NeahMissionCreate`
2. Trouver le node "Save Mission To API"
3. Ajouter `missionId` dans le body :
```json
{
"missionId": "={{ $node['Process Mission Data'].json.missionId }}",
...
}
```
4. Sauvegarder et activer le workflow
---
**Date**: $(date)
**Version**: 1.1
**Fichiers Modifiés**:
- `app/api/missions/route.ts` (ajout missionId dans n8nData)
- `app/api/missions/mission-created/route.ts` (recherche par missionId)

View File

@ -1,673 +0,0 @@
# Mapping Complet N8N - Création et Suppression de Mission
## 📋 Vue d'Ensemble
Ce document décrit le mapping complet entre notre API et les workflows N8N pour la création et la suppression de missions, basé sur les workflows réels partagés.
---
## 🔄 Workflow de Création - NeahMissionCreate
### Structure du Workflow
```
Webhook (mission-created)
Process Mission Data
Get Keycloak Token
Process Token
Debug Service Data
Merge Paths
IF Run Integrations
├─ IF Needs Git Repository
│ ├─ Create Git Repository (si Gite ou Calcul)
│ ├─ Create Readme
│ └─ Git Wiki
├─ Create Documentation Collection
├─ Create Leantime Project
│ └─ Leantime Avatar
└─ Create RocketChat Channel
Combine Results
Save Mission To API (POST /mission-created)
Process Results
Respond To Webhook
```
### Données Envoyées par Notre API → N8N
**Endpoint** : `POST https://brain.slm-lab.net/webhook/mission-created`
**Format** :
```typescript
{
name: string,
oddScope: string[],
niveau: string,
intention: string,
missionType: string,
donneurDOrdre: string,
projection: string,
services: string[],
participation: string,
profils: string[],
guardians: {
"gardien-temps": userId,
"gardien-parole": userId,
"gardien-memoire": userId
},
volunteers: string[],
creatorId: string,
logo: {
data: "data:image/png;base64,...",
name: string,
type: string
},
attachments: Array<{
data: "data:...;base64,...",
name: string,
type: string
}>,
config: {
N8N_API_KEY: string,
MISSION_API_URL: string
}
}
```
### Traitement N8N - Process Mission Data
Le node "Process Mission Data" transforme les données en :
```javascript
{
missionOriginal: { ... }, // Données originales
missionProcessed: {
name: "Mission Example",
sanitizedName: "mission-example", // Nom nettoyé pour URLs
intention: "...",
description: "...",
startDate: "2024-01-01",
endDate: "2024-01-31",
missionType: "remote",
guardians: { ... },
volunteers: [ ... ],
profils: [ ... ],
services: ["Gite", "ArtLab"], // Détermine quelles intégrations créer
clientId: 2,
rocketChatUsernames: [userId1, userId2, ...], // Gardiens + volontaires
logo: { data: "...", name: "...", type: "..." },
attachments: [ ... ]
},
config: {
GITEA_API_URL: "https://gite.slm-lab.net/api/v1",
GITEA_API_TOKEN: "...",
GITEA_OWNER: "alma",
LEANTIME_API_URL: "https://agilite.slm-lab.net",
LEANTIME_API_TOKEN: "...",
ROCKETCHAT_API_URL: "https://parole.slm-lab.net/",
ROCKETCHAT_AUTH_TOKEN: "...",
ROCKETCHAT_USER_ID: "...",
OUTLINE_API_URL: "https://chapitre.slm-lab.net/api",
OUTLINE_API_TOKEN: "...",
MISSION_API_URL: "https://hub.slm-lab.net",
// ... autres configs
},
creatorId: "user-id"
}
```
### Intégrations Créées par N8N
#### 1. Gitea Repository (Conditionnel)
**Condition** : `services.includes('Gite') || services.includes('Calcul')`
**Node** : "Create Git Repository"
- **Méthode** : POST
- **URL** : `{{ GITEA_API_URL }}/user/repos`
- **Body** :
```json
{
"name": "{{ sanitizedName }}",
"private": true,
"auto_init": true,
"avatar_url": "{{ logo.data }}"
}
```
- **Résultat** : `{ html_url: "https://gite.slm-lab.net/alma/mission-example" }`
**Actions supplémentaires** :
- Create Readme : Crée un document README dans Outline
- Git Wiki : Configure le wiki externe du repo vers Outline
#### 2. Leantime Project
**Node** : "Create Leantime Project"
- **Méthode** : POST
- **URL** : `{{ LEANTIME_API_URL }}/api/jsonrpc`
- **Body** :
```json
{
"method": "leantime.rpc.Projects.Projects.addProject",
"jsonrpc": "2.0",
"id": 1,
"params": {
"values": {
"name": "{{ name }}",
"clientId": {{ clientId }},
"details": "{{ intention }}",
"type": "project",
"start": "{{ startDate }}",
"end": "{{ endDate }}",
"status": "open",
"psettings": "restricted",
"avatar": "{{ logo.data }}"
}
}
}
```
- **Résultat** : `{ result: [projectId] }` (array avec 1 élément)
**Action supplémentaire** :
- Leantime Avatar : Met à jour l'avatar du projet
#### 3. Outline Collection
**Node** : "Create Documentation Collection"
- **Méthode** : POST
- **URL** : `{{ OUTLINE_API_URL }}/api/collections.create`
- **Body** :
```json
{
"name": "{{ sanitizedName }}",
"description": "{{ description }}",
"permission": "read",
"private": true
}
```
- **Résultat** : `{ data: { id: "collection-id", url: "/collection/..." } }`
#### 4. RocketChat Channel
**Node** : "Create RocketChat Channel"
- **Méthode** : POST
- **URL** : `{{ ROCKETCHAT_API_URL }}/api/v1/channels.create`
- **Body** :
```json
{
"name": "{{ sanitizedName }}",
"members": [{{ rocketChatUsernames }}],
"readOnly": false,
"avatarUrl": "{{ logo.data }}"
}
```
- **Résultat** : `{ channel: { _id: "channel-id", ... } }`
### Save Mission To API - Retour vers Notre API
**Node** : "Save Mission To API"
- **Méthode** : POST
- **URL** : `{{ MISSION_API_URL }}/mission-created`
- **Headers** :
- `Content-Type: application/json`
- `Authorization: Bearer {{ Keycloak Token }}`
- `x-api-key: {{ N8N_API_KEY }}`
- **Body** :
```json
{
"name": "{{ name }}",
"niveau": "{{ niveau }}",
"intention": "{{ intention }}",
"description": "{{ description }}",
"gitRepoUrl": "{{ gitRepo.html_url }}",
"leantimeProjectId": "{{ leantimeProject.result[0] }}",
"documentationCollectionId": "{{ docCollection.data.id }}",
"rocketchatChannelId": "{{ rocketChatChannel.channel._id }}",
"donneurDOrdre": "{{ donneurDOrdre }}",
"projection": "{{ projection }}",
"missionType": "{{ missionType }}",
"creatorId": "{{ creatorId }}"
}
```
**⚠️ IMPORTANT** : Cet endpoint `/mission-created` n'existe **PAS** actuellement dans notre codebase. Il devrait :
1. Recevoir les IDs des intégrations créées
2. Mettre à jour la mission en base avec ces IDs
3. Mapper les champs :
- `gitRepoUrl``giteaRepositoryUrl`
- `documentationCollectionId``outlineCollectionId`
- `rocketchatChannelId``rocketChatChannelId`
---
## 🗑️ Workflow de Suppression - NeahMissionDelete_Pro
### Structure du Workflow
```
Webhook Delete (mission-delete)
Process Delete Data
Get Keycloak Token
[En parallèle]
├─ Delete Gitea Repo
├─ Close Leantime Project
├─ Delete Outline Collection
└─ Close RocketChat Channel
Combine Results
Save Deletion To API (POST /mission-deleted)
```
### Données Envoyées par Notre API → N8N
**Endpoint** : `POST https://brain.slm-lab.net/webhook-test/mission-delete`
**Format** :
```typescript
{
missionId: string,
name: string,
repoName: string, // ✅ Extrait de giteaRepositoryUrl
leantimeProjectId: number | 0, // ✅ Converti en number
documentationCollectionId: string, // ✅ Mappé depuis outlineCollectionId
rocketchatChannelId: string, // ✅ Mappé depuis rocketChatChannelId
// Champs originaux pour référence
giteaRepositoryUrl: string | null,
outlineCollectionId: string | null,
rocketChatChannelId: string | null,
penpotProjectId: string | null,
config: {
N8N_API_KEY: string,
MISSION_API_URL: string
}
}
```
### Traitement N8N - Process Delete Data
Le node "Process Delete Data" transforme les données en :
```javascript
{
missionData: {
repoName: input.repoName || '',
leantimeId: input.leantimeProjectId || 0,
collectionId: input.documentationCollectionId || '',
rocketChatRoomId: input.rocketchatChannelId || ''
},
config: {
GITEA_API_URL: "https://gite.slm-lab.net/api/v1",
GITEA_API_TOKEN: "...",
GITEA_OWNER: "alma",
LEANTIME_API_URL: "https://agilite.slm-lab.net",
LEANTIME_API_TOKEN: "...",
ROCKETCHAT_API_URL: "https://parole.slm-lab.net/",
ROCKETCHAT_AUTH_TOKEN: "...",
ROCKETCHAT_USER_ID: "...",
OUTLINE_API_URL: "https://chapitre.slm-lab.net/api",
OUTLINE_API_TOKEN: "...",
MISSION_API_URL: "https://hub.slm-lab.net",
KEYCLOAK_BASE_URL: "https://connect.slm-lab.net",
KEYCLOAK_REALM: "cercle",
KEYCLOAK_CLIENT_ID: "lab",
KEYCLOAK_CLIENT_SECRET: "..."
}
}
```
### Actions de Suppression N8N
#### 1. Delete Gitea Repo
**Node** : "Delete Gitea Repo"
- **Méthode** : DELETE
- **URL** : `{{ GITEA_API_URL }}/repos/{{ GITEA_OWNER }}/{{ repoName }}`
- **Headers** : `Authorization: token {{ GITEA_API_TOKEN }}`
- **ContinueOnFail** : `true`
- **Résultat attendu** : Status 204 = succès
#### 2. Close Leantime Project
**Node** : "Close Leantime Project"
- **Méthode** : POST
- **URL** : `{{ LEANTIME_API_URL }}/api/jsonrpc`
- **Body** :
```json
{
"method": "leantime.rpc.Projects.Projects.patch",
"jsonrpc": "2.0",
"id": 1,
"params": {
"id": {{ leantimeId }},
"params": { "status": "closed" }
}
}
```
- **ContinueOnFail** : `true`
- **Note** : Le projet est **fermé** (status: "closed"), pas supprimé
#### 3. Delete Outline Collection
**Node** : "Delete Outline Collection"
- **Méthode** : POST
- **URL** : `{{ OUTLINE_API_URL }}/api/collections.delete`
- **Body** : `{ "id": "{{ collectionId }}" }`
- **ContinueOnFail** : `true`
- **Résultat attendu** : Status 200 = succès
#### 4. Close RocketChat Channel
**Node** : "Close RocketChat Channel"
- **Méthode** : POST
- **URL** : `{{ ROCKETCHAT_API_URL }}/api/v1/channels.close`
- **Body** : `{ "roomId": "{{ rocketChatRoomId }}" }`
- **ContinueOnFail** : `true`
- **Note** : Le canal est **fermé**, pas supprimé
### Combine Results
Le node "Combine Results" combine les résultats :
```javascript
{
status: "deleted",
timestamp: "2024-01-01T12:00:00.000Z",
details: {
gitea: true || "already_deleted",
leantime: true || false,
outline: true || false,
rocketchat: true || false
}
}
```
### Save Deletion To API - Retour vers Notre API
**Node** : "Save Deletion To API"
- **Méthode** : POST
- **URL** : `{{ MISSION_API_URL }}/mission-deleted`
- **Headers** :
- `Authorization: Bearer {{ Keycloak Token }}`
- **Body** :
```json
{
"status": "archived",
"results": {
"gitea": true,
"leantime": true,
"outline": true,
"rocketchat": true
}
}
```
**⚠️ IMPORTANT** : Cet endpoint `/mission-deleted` n'existe **PAS** actuellement dans notre codebase. Il pourrait servir à :
1. Confirmer la suppression
2. Logger les résultats
3. Nettoyer des données supplémentaires si nécessaire
---
## 📊 Mapping Complet des Champs
### Création (Notre API → N8N → Retour)
| Notre Base | Envoyé à N8N | N8N Crée | Retour N8N | Stocké en Base |
|-----------|--------------|----------|------------|----------------|
| - | `name` | - | `name` | `name` |
| - | `services` | Détermine intégrations | - | `services` |
| - | `logo.data` | Avatar/Logo | - | `logo` (path) |
| - | - | Gitea Repo | `gitRepoUrl` | `giteaRepositoryUrl` |
| - | - | Leantime Project | `leantimeProjectId` | `leantimeProjectId` |
| - | - | Outline Collection | `documentationCollectionId` | `outlineCollectionId` |
| - | - | RocketChat Channel | `rocketchatChannelId` | `rocketChatChannelId` |
### Suppression (Notre Base → N8N)
| Notre Base | Extrait/Transformé | Envoyé à N8N | N8N Attend |
|-----------|-------------------|--------------|------------|
| `giteaRepositoryUrl` | Extraction nom | `repoName` | `repoName` |
| `leantimeProjectId` | Converti en number | `leantimeProjectId` | `leantimeId` |
| `outlineCollectionId` | Direct | `documentationCollectionId` | `collectionId` |
| `rocketChatChannelId` | Direct | `rocketchatChannelId` | `rocketChatRoomId` |
---
## 🔧 Transformations Clés
### 1. Extraction du Nom du Repository Gitea
**Problème** : Notre base stocke l'URL complète, N8N attend le nom seul
**Solution** :
```typescript
// Format: https://gite.slm-lab.net/alma/mission-example
// ou: https://gite.slm-lab.net/api/v1/repos/alma/mission-example
let repoName = '';
if (giteaRepositoryUrl) {
try {
const url = new URL(giteaRepositoryUrl);
const pathParts = url.pathname.split('/').filter(Boolean);
repoName = pathParts[pathParts.length - 1] || '';
} catch (error) {
const match = giteaRepositoryUrl.match(/\/([^\/]+)\/?$/);
repoName = match ? match[1] : '';
}
}
```
### 2. Mapping des Champs
**Création** :
- N8N retourne `gitRepoUrl` → On stocke `giteaRepositoryUrl`
- N8N retourne `documentationCollectionId` → On stocke `outlineCollectionId`
- N8N retourne `rocketchatChannelId` → On stocke `rocketChatChannelId`
**Suppression** :
- On stocke `giteaRepositoryUrl` → On envoie `repoName` (extrait)
- On stocke `outlineCollectionId` → On envoie `documentationCollectionId`
- On stocke `rocketChatChannelId` → On envoie `rocketchatChannelId`
### 3. Conversion de Types
**Leantime Project ID** :
- Stocké en base : `string | null`
- Envoyé à N8N : `number | 0` (converti)
- N8N attend : `number` (dans `leantimeId`)
---
## ⚠️ Endpoints Manquants
### 1. POST /mission-created
**Rôle** : Recevoir les IDs des intégrations créées par N8N
**Format attendu** :
```typescript
POST /mission-created
Headers: {
Authorization: "Bearer {keycloak_token}",
x-api-key: "{N8N_API_KEY}"
}
Body: {
name: string,
niveau: string,
intention: string,
description: string,
gitRepoUrl: string, // À mapper vers giteaRepositoryUrl
leantimeProjectId: string, // À mapper vers leantimeProjectId
documentationCollectionId: string, // À mapper vers outlineCollectionId
rocketchatChannelId: string, // À mapper vers rocketChatChannelId
donneurDOrdre: string,
projection: string,
missionType: string,
creatorId: string
}
```
**Action requise** :
1. Trouver la mission par `name` + `creatorId`
2. Mettre à jour avec les IDs retournés
3. Mapper les champs correctement
### 2. POST /mission-deleted
**Rôle** : Confirmer la suppression (optionnel)
**Format attendu** :
```typescript
POST /mission-deleted
Headers: {
Authorization: "Bearer {keycloak_token}"
}
Body: {
status: "archived",
results: {
gitea: boolean,
leantime: boolean,
outline: boolean,
rocketchat: boolean
}
}
```
**Action requise** :
- Logger les résultats
- Potentiellement nettoyer des données supplémentaires
---
## 🔄 Flow Complet - Vue d'Ensemble
### Création
```
1. Frontend → POST /api/missions
2. Backend crée mission en Prisma
3. Backend upload fichiers Minio
4. Backend → POST N8N webhook (mission-created)
5. N8N crée intégrations (Gitea, Leantime, Outline, RocketChat)
6. N8N → POST /mission-created (⚠️ endpoint manquant)
7. Backend met à jour mission avec IDs (⚠️ non implémenté)
```
### Suppression
```
1. Frontend → DELETE /api/missions/[id]
2. Backend récupère mission
3. Backend extrait/mappe les données
4. Backend → POST N8N webhook (mission-delete)
5. N8N supprime/ferme intégrations
6. N8N → POST /mission-deleted (⚠️ endpoint manquant)
7. Backend supprime logo Minio
8. Backend supprime attachments Minio
9. Backend supprime mission Prisma (CASCADE)
```
---
## 📝 Notes Importantes
### 1. Noms de Champs Incohérents
- **Création** : N8N retourne `gitRepoUrl`, `documentationCollectionId`, `rocketchatChannelId`
- **Suppression** : N8N attend `repoName`, `documentationCollectionId`, `rocketchatChannelId`
- **Notre Base** : Stocke `giteaRepositoryUrl`, `outlineCollectionId`, `rocketChatChannelId`
**Solution** : Mapping cohérent dans les deux sens
### 2. Endpoint /mission-created Manquant
Actuellement, les IDs retournés par N8N ne sont **PAS** sauvegardés en base. Il faudrait :
- Créer l'endpoint `/mission-created`
- Trouver la mission (par `name` + `creatorId` ou `missionId`)
- Mettre à jour avec les IDs
### 3. Services Conditionnels
- **Gitea** : Créé seulement si `services.includes('Gite') || services.includes('Calcul')`
- **Leantime** : Toujours créé
- **Outline** : Toujours créé
- **RocketChat** : Toujours créé
### 4. Gestion d'Erreurs
- Tous les nodes N8N ont `continueOnFail: true`
- Les erreurs sont loggées mais n'arrêtent pas le workflow
- Les résultats indiquent quelles intégrations ont réussi/échoué
---
## 🔍 Points de Debugging
### Création
1. **Vérifier données envoyées à N8N** :
```
Sending to N8N: { ... }
```
2. **Vérifier réponse N8N** :
```
N8N Workflow Result: { success: true, results: {...} }
```
3. **Vérifier endpoint /mission-created** :
- Doit recevoir les IDs
- Doit mettre à jour la mission
### Suppression
1. **Vérifier extraction repoName** :
```
Extracted repo name from URL: { url: "...", repoName: "..." }
```
2. **Vérifier données envoyées à N8N** :
```
Sending deletion data to N8N: { ... }
```
3. **Vérifier réponse N8N** :
```
N8N Deletion Workflow Result: { success: true, results: {...} }
```
---
**Document généré le** : $(date)
**Version** : 1.0
**Workflows N8N** :
- NeahMissionCreate (création)
- NeahMissionDelete_Pro (suppression)

View File

@ -1,342 +0,0 @@
# Mapping N8N Workflow - Mission Deletion
## 📋 Vue d'Ensemble
Ce document décrit le mapping entre les données de notre API et le format attendu par le workflow N8N `NeahMissionDelete_Pro`.
---
## 🔄 Workflow N8N - Structure
### Nodes du Workflow
1. **Webhook Delete** : Reçoit POST sur `/mission-delete`
2. **Process Delete Data** : Transforme les données d'entrée
3. **Get Keycloak Token** : Obtient un token d'authentification
4. **Delete Gitea Repo** : Supprime le repository Gitea (continueOnFail: true)
5. **Close Leantime Project** : Ferme le projet Leantime (continueOnFail: true)
6. **Delete Outline Collection** : Supprime la collection Outline (continueOnFail: true)
7. **Close RocketChat Channel** : Ferme le canal RocketChat (continueOnFail: true)
8. **Combine Results** : Combine les résultats de toutes les suppressions
9. **Save Deletion To API** : Envoie les résultats à l'API
---
## 📊 Mapping des Données
### Données Envoyées par Notre API
```typescript
{
missionId: string,
name: string,
repoName: string, // Extrait de giteaRepositoryUrl
leantimeProjectId: number | null,
documentationCollectionId: string, // Mappé depuis outlineCollectionId
rocketchatChannelId: string, // Mappé depuis rocketChatChannelId
// Champs originaux conservés pour référence
giteaRepositoryUrl: string | null,
outlineCollectionId: string | null,
rocketChatChannelId: string | null,
penpotProjectId: string | null,
config: {
N8N_API_KEY: string,
MISSION_API_URL: string
}
}
```
### Données Attendues par N8N (Process Delete Data)
Le node "Process Delete Data" transforme les données en :
```javascript
{
missionData: {
repoName: input.repoName || '',
leantimeId: input.leantimeProjectId || 0,
collectionId: input.documentationCollectionId || '',
rocketChatRoomId: input.rocketchatChannelId || ''
},
config: {
GITEA_API_URL: "https://gite.slm-lab.net/api/v1",
GITEA_API_TOKEN: "...",
GITEA_OWNER: "alma",
LEANTIME_API_URL: "https://agilite.slm-lab.net",
LEANTIME_API_TOKEN: "...",
ROCKETCHAT_API_URL: "https://parole.slm-lab.net/",
ROCKETCHAT_AUTH_TOKEN: "...",
ROCKETCHAT_USER_ID: "...",
OUTLINE_API_URL: "https://chapitre.slm-lab.net/api",
OUTLINE_API_TOKEN: "...",
MISSION_API_URL: "https://hub.slm-lab.net",
KEYCLOAK_BASE_URL: "https://connect.slm-lab.net",
KEYCLOAK_REALM: "cercle",
KEYCLOAK_CLIENT_ID: "lab",
KEYCLOAK_CLIENT_SECRET: "..."
}
}
```
---
## 🔧 Transformations Effectuées
### 1. Extraction du Nom du Repository Gitea
**Problème** : Notre base stocke `giteaRepositoryUrl` (URL complète), mais N8N attend `repoName` (nom seul)
**Solution** : Extraction du nom depuis l'URL
```typescript
// Format possible:
// - https://gite.slm-lab.net/alma/repo-name
// - https://gite.slm-lab.net/api/v1/repos/alma/repo-name
let repoName = '';
if (mission.giteaRepositoryUrl) {
try {
const url = new URL(mission.giteaRepositoryUrl);
const pathParts = url.pathname.split('/').filter(Boolean);
repoName = pathParts[pathParts.length - 1] || '';
} catch (error) {
// Fallback: extraction regex
const match = mission.giteaRepositoryUrl.match(/\/([^\/]+)\/?$/);
repoName = match ? match[1] : '';
}
}
```
**Exemples** :
- `https://gite.slm-lab.net/alma/mission-abc``mission-abc`
- `https://gite.slm-lab.net/api/v1/repos/alma/mission-xyz``mission-xyz`
### 2. Mapping des Champs
| Notre Base de Données | N8N Attend | Transformation |
|----------------------|------------|----------------|
| `giteaRepositoryUrl` | `repoName` | Extraction du nom depuis URL |
| `leantimeProjectId` | `leantimeProjectId` | Direct (converti en number) |
| `outlineCollectionId` | `documentationCollectionId` | Direct mapping |
| `rocketChatChannelId` | `rocketchatChannelId` | Direct mapping (lowercase 'c') |
---
## 🎯 Actions N8N par Service
### 1. Gitea Repository
**Node** : "Delete Gitea Repo"
- **Méthode** : DELETE
- **URL** : `{{ GITEA_API_URL }}/repos/{{ GITEA_OWNER }}/{{ repoName }}`
- **Headers** : `Authorization: token {{ GITEA_API_TOKEN }}`
- **ContinueOnFail** : `true` (continue même si échoue)
**Résultat attendu** : Status 204 (No Content) = succès
### 2. Leantime Project
**Node** : "Close Leantime Project"
- **Méthode** : POST
- **URL** : `{{ LEANTIME_API_URL }}/api/jsonrpc`
- **Headers** : `X-API-Key: {{ LEANTIME_API_TOKEN }}`
- **Body** :
```json
{
"method": "leantime.rpc.Projects.Projects.patch",
"jsonrpc": "2.0",
"id": 1,
"params": {
"id": {{ leantimeId }},
"params": { "status": "closed" }
}
}
```
- **ContinueOnFail** : `true`
**Note** : Le projet est **fermé** (status: "closed"), pas supprimé
### 3. Outline Collection
**Node** : "Delete Outline Collection"
- **Méthode** : POST
- **URL** : `{{ OUTLINE_API_URL }}/api/collections.delete`
- **Headers** : `Authorization: Bearer {{ OUTLINE_API_TOKEN }}`
- **Body** : `{ "id": "{{ collectionId }}" }`
- **ContinueOnFail** : `true`
**Résultat attendu** : Status 200 = succès
### 4. RocketChat Channel
**Node** : "Close RocketChat Channel"
- **Méthode** : POST
- **URL** : `{{ ROCKETCHAT_API_URL }}/api/v1/channels.close`
- **Headers** :
- `X-Auth-Token: {{ ROCKETCHAT_AUTH_TOKEN }}`
- `X-User-Id: {{ ROCKETCHAT_USER_ID }}`
- **Body** : `{ "roomId": "{{ rocketChatRoomId }}" }`
- **ContinueOnFail** : `true`
**Note** : Le canal est **fermé**, pas supprimé
---
## 📤 Réponse N8N
### Format de Réponse (Combine Results)
```javascript
{
status: "deleted",
timestamp: "2024-01-01T12:00:00.000Z",
details: {
gitea: true || "already_deleted",
leantime: true || false,
outline: true || false,
rocketchat: true || false
}
}
```
### Envoi à l'API (Save Deletion To API)
Le workflow envoie ensuite les résultats à :
- **URL** : `{{ MISSION_API_URL }}/mission-deleted`
- **Méthode** : POST
- **Headers** : `Authorization: Bearer {{ Keycloak Token }}`
- **Body** :
```json
{
"status": "archived",
"results": {
"gitea": true,
"leantime": true,
"outline": true,
"rocketchat": true
}
}
```
---
## ⚠️ Points d'Attention
### 1. Gestion des Erreurs
- Tous les nodes de suppression ont `continueOnFail: true`
- Si une suppression échoue, le workflow continue avec les autres
- Les résultats indiquent quelles suppressions ont réussi/échoué
### 2. Différences de Comportement
- **Gitea** : Suppression complète du repository
- **Leantime** : Fermeture (status: "closed"), pas suppression
- **Outline** : Suppression complète de la collection
- **RocketChat** : Fermeture du canal, pas suppression
### 3. Extraction du Repo Name
- L'extraction doit gérer différents formats d'URL
- Si l'extraction échoue, `repoName` sera vide
- Le workflow N8N gérera le cas où `repoName` est vide
### 4. Mapping des Champs
- **documentationCollectionId** : Mappé depuis `outlineCollectionId`
- **rocketchatChannelId** : Mappé depuis `rocketChatChannelId` (attention au 'c' minuscule)
- **leantimeProjectId** : Converti en number (0 si null)
---
## 🔍 Debugging
### Logs à Surveiller
1. **Extraction repo name** :
```
Extracted repo name from URL: { url: "...", repoName: "..." }
```
2. **Données envoyées à N8N** :
```
Sending deletion data to N8N: { ... }
```
3. **Résultat N8N** :
```
N8N Deletion Workflow Result: { success: true, results: {...} }
```
### Vérifications
1. **Repo name extrait correctement** ?
- Vérifier les logs d'extraction
- Format attendu : nom simple sans URL
2. **Mapping des champs correct** ?
- `documentationCollectionId` = `outlineCollectionId`
- `rocketchatChannelId` = `rocketChatChannelId`
3. **N8N a reçu les données** ?
- Vérifier les logs N8N
- Vérifier le webhook a été appelé
---
## 📝 Exemple Complet
### Données en Base
```typescript
{
id: "abc-123",
name: "Mission Example",
giteaRepositoryUrl: "https://gite.slm-lab.net/alma/mission-example",
leantimeProjectId: "123",
outlineCollectionId: "collection-456",
rocketChatChannelId: "channel-789"
}
```
### Données Envoyées à N8N
```typescript
{
missionId: "abc-123",
name: "Mission Example",
repoName: "mission-example", // Extrait de l'URL
leantimeProjectId: 123, // Converti en number
documentationCollectionId: "collection-456", // Mappé
rocketchatChannelId: "channel-789", // Mappé (lowercase 'c')
giteaRepositoryUrl: "https://gite.slm-lab.net/alma/mission-example",
outlineCollectionId: "collection-456",
rocketChatChannelId: "channel-789",
config: {
N8N_API_KEY: "...",
MISSION_API_URL: "https://hub.slm-lab.net"
}
}
```
### Données Traitées par N8N
```javascript
{
missionData: {
repoName: "mission-example",
leantimeId: 123,
collectionId: "collection-456",
rocketChatRoomId: "channel-789"
},
config: { ... }
}
```
---
**Document généré le** : $(date)
**Version** : 1.0
**Workflow N8N** : NeahMissionDelete_Pro
**Webhook URL** : https://brain.slm-lab.net/webhook-test/mission-delete

View File

@ -1,129 +0,0 @@
# Navigation Bar Time Integration
## 🎯 Overview
The navigation bar (`components/main-nav.tsx`) currently displays a static time that doesn't refresh. This document outlines how to integrate it into the unified refresh system.
## 🔍 Current Issue
**File**: `components/main-nav.tsx` (lines 228-231)
```typescript
// Current code - STATIC (doesn't refresh)
const now = new Date();
const formattedDate = format(now, "d MMMM yyyy", { locale: fr });
const formattedTime = format(now, "HH:mm");
```
**Problem**: Time is calculated once when component renders and never updates.
## ✅ Solution
### Step 1: Create Time Component
**File**: `components/main-nav-time.tsx` (✅ Already created)
This component:
- Uses `useState` to track current time
- Uses `useUnifiedRefresh` hook for 1-second updates
- Properly cleans up on unmount
- No API calls needed (client-side only)
### Step 2: Update MainNav Component
**File**: `components/main-nav.tsx`
**Changes needed**:
1. **Import the new component**:
```typescript
import { MainNavTime } from './main-nav-time';
```
2. **Remove static time code** (lines 228-231):
```typescript
// DELETE THESE LINES:
// Format current date and time
const now = new Date();
const formattedDate = format(now, "d MMMM yyyy", { locale: fr });
const formattedTime = format(now, "HH:mm");
```
3. **Replace time display** (lines 294-298):
```typescript
// BEFORE:
{/* Center - Date and Time */}
<div className="hidden md:flex flex-col items-center">
<div className="text-white/80 text-xs">{formattedDate}</div>
<div className="text-white text-sm font-medium">{formattedTime}</div>
</div>
// AFTER:
{/* Center - Date and Time */}
<MainNavTime />
```
### Step 3: Verify Integration
After changes:
- ✅ Time updates every second
- ✅ Uses unified refresh system
- ✅ Proper cleanup on unmount
- ✅ No memory leaks
- ✅ Consistent with other widgets
## 📊 Benefits
1. **Real-time clock**: Time updates every second
2. **Unified system**: Uses same refresh manager as widgets
3. **Memory safe**: Proper cleanup prevents leaks
4. **Consistent**: Same pattern as other components
5. **Maintainable**: Centralized refresh logic
## 🔧 Technical Details
### Refresh Configuration
- **Resource**: `navbar-time`
- **Interval**: 1000ms (1 second)
- **Priority**: `high` (real-time display)
- **API Calls**: None (client-side only)
- **Cleanup**: Automatic via `useUnifiedRefresh`
### Integration with Refresh Manager
The time component registers with the refresh manager:
```typescript
useUnifiedRefresh({
resource: 'navbar-time',
interval: REFRESH_INTERVALS.NAVBAR_TIME, // 1000ms
enabled: true, // Always enabled
onRefresh: async () => {
setCurrentTime(new Date());
},
priority: 'high',
});
```
## ✅ Implementation Checklist
- [x] Create `components/main-nav-time.tsx`
- [x] Add `NAVBAR_TIME` to refresh intervals
- [x] Add `navbar-time` to refreshable resources
- [ ] Update `components/main-nav.tsx` to use new component
- [ ] Test time updates correctly
- [ ] Verify cleanup on unmount
- [ ] Test with multiple tabs
## 🎯 Expected Result
After implementation:
- Time updates smoothly every second
- No performance impact
- No memory leaks
- Consistent with unified refresh system
---
*Last Updated: Navbar Time Integration Guide*

View File

@ -1,156 +0,0 @@
# Fix Erreur 502 - Headers trop grands (Nginx)
## 🔍 Problème Identifié
**Erreur Nginx** :
```
upstream sent too big header while reading response header from upstream
```
**Cause** : Le cookie de session NextAuth est trop grand (> 4KB par défaut dans Nginx). Le JWT contient :
- `accessToken` (Keycloak) - ~1-2KB
- `refreshToken` (Keycloak) - ~1-2KB
- `idToken` (Keycloak) - ~1-2KB
- Données utilisateur (roles, etc.) - ~500B-1KB
- **Total** : ~4-7KB, ce qui dépasse la limite Nginx par défaut
## ✅ Solutions
### Solution 1 : Augmenter la limite Nginx (RECOMMANDÉ)
**Fichier** : Configuration Nginx (généralement `/etc/nginx/sites-available/hub.slm-lab.net` ou similaire)
**Ajouter dans le bloc `server` ou `location`** :
```nginx
server {
# ... autres configs ...
# Augmenter la taille maximale des headers
proxy_buffer_size 16k;
proxy_buffers 8 16k;
proxy_busy_buffers_size 32k;
large_client_header_buffers 4 32k;
# Spécifiquement pour les headers de réponse
proxy_headers_hash_max_size 512;
proxy_headers_hash_bucket_size 128;
# ... reste de la config ...
}
```
**OU** pour une solution plus simple, ajouter seulement :
```nginx
server {
# ... autres configs ...
# Augmenter la limite des headers
large_client_header_buffers 4 32k;
# ... reste de la config ...
}
```
**Puis redémarrer Nginx** :
```bash
sudo nginx -t # Vérifier la config
sudo systemctl reload nginx # Ou sudo service nginx reload
```
### Solution 2 : Réduire la taille du JWT (ALTERNATIVE)
Si on ne peut pas modifier Nginx, on peut réduire la taille du JWT en ne stockant pas tous les tokens.
**Modification** : `app/api/auth/options.ts`
**Option A** : Ne pas stocker `idToken` dans le JWT (si pas nécessaire)
```typescript
// Dans JWT callback
token.idToken = account.id_token ?? ''; // ❌ Supprimer cette ligne
```
**Option B** : Stocker seulement les tokens nécessaires
```typescript
// Stocker seulement accessToken et refreshToken
// idToken peut être récupéré depuis Keycloak si nécessaire
```
**Note** : Cette solution réduit la fonctionnalité. La Solution 1 est préférable.
## 🔧 Configuration Nginx Complète Recommandée
```nginx
server {
listen 443 ssl http2;
server_name hub.slm-lab.net;
# ... SSL config ...
# Augmenter les limites pour les gros headers NextAuth
proxy_buffer_size 16k;
proxy_buffers 8 16k;
proxy_busy_buffers_size 32k;
large_client_header_buffers 4 32k;
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
location / {
proxy_pass http://172.16.0.102:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
# Headers pour NextAuth
proxy_set_header Cookie $http_cookie;
}
}
```
## 📊 Vérification
**Après modification Nginx** :
1. **Tester la config** :
```bash
sudo nginx -t
```
2. **Recharger Nginx** :
```bash
sudo systemctl reload nginx
```
3. **Tester la connexion** :
- Se connecter via Keycloak
- Vérifier que l'erreur 502 ne se produit plus
- Vérifier les logs Nginx pour confirmer
## 🎯 Cause Technique
NextAuth crée un cookie JWT qui contient :
- Le JWT encrypté avec `NEXTAUTH_SECRET`
- Le JWT contient tous les tokens Keycloak
- La taille totale peut dépasser 4KB
Nginx a une limite par défaut de 4KB pour les headers de réponse. Quand Next.js essaie de renvoyer un cookie > 4KB, Nginx rejette avec "upstream sent too big header".
## ✅ Solution Immédiate
**Action** : Modifier la configuration Nginx pour augmenter `large_client_header_buffers` à au moins `4 32k` ou `8 16k`.
**Impact** : Résout immédiatement l'erreur 502.
---
**Document créé le** : $(date)
**Priorité** : HAUTE - C'est la cause de l'erreur 502

View File

@ -1,548 +0,0 @@
# Notification and Widget Update System - Complete File & Route Analysis
## 📋 Table of Contents
1. [Notification System](#notification-system)
2. [Widget Update System](#widget-update-system)
3. [API Routes](#api-routes)
4. [Components](#components)
5. [Services & Libraries](#services--libraries)
6. [Hooks](#hooks)
7. [Types](#types)
---
## 🔔 Notification System
### API Routes
#### 1. **GET `/api/notifications`**
- **File**: `app/api/notifications/route.ts`
- **Purpose**: Fetch paginated notifications for authenticated user
- **Query Parameters**:
- `page` (default: 1)
- `limit` (default: 20, max: 100)
- **Response**:
```json
{
"notifications": Notification[],
"page": number,
"limit": number,
"total": number
}
```
- **Cache**: 30 seconds client-side cache
- **Authentication**: Required (session-based)
#### 2. **GET `/api/notifications/count`**
- **File**: `app/api/notifications/count/route.ts`
- **Purpose**: Get notification count (total and unread) for authenticated user
- **Response**:
```json
{
"total": number,
"unread": number,
"sources": {
[source]: {
"total": number,
"unread": number
}
}
}
```
- **Cache**: 10 seconds client-side cache
- **Authentication**: Required
#### 3. **POST `/api/notifications/[id]/read`**
- **File**: `app/api/notifications/[id]/read/route.ts`
- **Purpose**: Mark a specific notification as read
- **Parameters**:
- `id` (path parameter): Notification ID (format: `source-sourceId`)
- **Response**:
```json
{
"success": boolean
}
```
- **Authentication**: Required
#### 4. **POST `/api/notifications/read-all`**
- **File**: `app/api/notifications/read-all/route.ts`
- **Purpose**: Mark all notifications as read for authenticated user
- **Response**:
```json
{
"success": boolean
}
```
- **Authentication**: Required
#### 5. **GET `/api/debug/notifications`**
- **File**: `app/api/debug/notifications/route.ts`
- **Purpose**: Debug endpoint to test notification system
- **Response**: Detailed debug information including:
- Environment variables status
- User information
- Notification service test results
- Performance metrics
- **Authentication**: Required
### Services
#### 1. **NotificationService** (Singleton)
- **File**: `lib/services/notifications/notification-service.ts`
- **Purpose**: Core notification aggregation service
- **Features**:
- Multi-source notification aggregation (adapter pattern)
- Redis caching (30s for counts, 5min for lists)
- Background refresh scheduling
- Cache invalidation on read operations
- Lock mechanism to prevent concurrent refreshes
- **Methods**:
- `getInstance()`: Get singleton instance
- `getNotifications(userId, page, limit)`: Fetch notifications
- `getNotificationCount(userId)`: Get notification counts
- `markAsRead(userId, notificationId)`: Mark notification as read
- `markAllAsRead(userId)`: Mark all as read
- `invalidateCache(userId)`: Invalidate user caches
- `scheduleBackgroundRefresh(userId)`: Schedule background refresh
#### 2. **NotificationAdapter Interface**
- **File**: `lib/services/notifications/notification-adapter.interface.ts`
- **Purpose**: Interface for notification source adapters
- **Methods**:
- `getNotifications(userId, page?, limit?)`: Fetch notifications
- `getNotificationCount(userId)`: Get counts
- `markAsRead(userId, notificationId)`: Mark as read
- `markAllAsRead(userId)`: Mark all as read
- `isConfigured()`: Check if adapter is configured
#### 3. **LeantimeAdapter** (Implementation)
- **File**: `lib/services/notifications/leantime-adapter.ts`
- **Purpose**: Leantime notification source adapter
- **Features**:
- Fetches notifications from Leantime API via JSON-RPC
- Maps Leantime user IDs by email
- Transforms Leantime notifications to unified format
- Supports marking notifications as read
- **Configuration**:
- `LEANTIME_API_URL` environment variable
- `LEANTIME_TOKEN` environment variable
### Components
#### 1. **NotificationBadge**
- **File**: `components/notification-badge.tsx`
- **Purpose**: Notification bell icon with badge and dropdown
- **Features**:
- Displays unread count badge
- Dropdown menu with recent notifications
- Manual refresh button
- Mark as read functionality
- Mark all as read functionality
- Source badges (e.g., "Agilité" for Leantime)
- Links to source systems
- Error handling and retry
- **Used in**: `components/main-nav.tsx`
#### 2. **MainNav** (Notification Integration)
- **File**: `components/main-nav.tsx`
- **Purpose**: Main navigation bar with notification badge
- **Notification Features**:
- Includes `<NotificationBadge />` component
- Browser notification permission handling
- User status-based notification management
### Hooks
#### 1. **useNotifications**
- **File**: `hooks/use-notifications.ts`
- **Purpose**: React hook for notification management
- **Features**:
- Automatic polling (60 seconds interval)
- Rate limiting (5 seconds minimum between fetches)
- Debounced count fetching (300ms)
- Manual refresh support
- Mount/unmount lifecycle management
- Error handling
- **Returns**:
```typescript
{
notifications: Notification[],
notificationCount: NotificationCount,
loading: boolean,
error: string | null,
fetchNotifications: (page?, limit?) => Promise<void>,
fetchNotificationCount: () => Promise<void>,
markAsRead: (notificationId: string) => Promise<boolean>,
markAllAsRead: () => Promise<boolean>
}
```
### Types
#### 1. **Notification Types**
- **File**: `lib/types/notification.ts`
- **Interfaces**:
- `Notification`: Main notification interface
- `id`: string (format: `source-sourceId`)
- `source`: 'leantime' | 'nextcloud' | 'gitea' | 'dolibarr' | 'moodle'
- `sourceId`: string
- `type`: string
- `title`: string
- `message`: string
- `link?`: string
- `isRead`: boolean
- `timestamp`: Date
- `priority`: 'low' | 'normal' | 'high'
- `user`: { id: string, name?: string }
- `metadata?`: Record<string, any>
- `NotificationCount`: Count interface
- `total`: number
- `unread`: number
- `sources`: Record<string, { total: number, unread: number }>
---
## 🎨 Widget Update System
### Dashboard Widgets
The main dashboard (`app/page.tsx`) contains the following widgets:
1. **QuoteCard** - Daily quote widget
2. **Calendar** - Upcoming events widget
3. **News** - News articles widget
4. **Duties** - Tasks/Devoirs widget (Leantime)
5. **Email** - Email inbox widget
6. **Parole** - Chat messages widget (Rocket.Chat)
### Widget Components & Update Mechanisms
#### 1. **Calendar Widget**
- **Files**:
- `components/calendar.tsx` (Main dashboard widget)
- `components/calendar-widget.tsx` (Alternative implementation)
- `components/calendar/calendar-widget.tsx` (Calendar-specific widget)
- **Update Mechanism**:
- **Manual Refresh**: Refresh button in header
- **Auto Refresh**: Every 5 minutes (300000ms interval)
- **API Endpoint**: `/api/calendars?refresh=true`
- **Features**:
- Fetches calendars with events
- Filters upcoming events (today and future)
- Sorts by date (oldest first)
- Shows up to 7 events
- Displays calendar color coding
- **State Management**:
- `useState` for events, loading, error
- `useEffect` for initial fetch and interval setup
#### 2. **News Widget**
- **File**: `components/news.tsx`
- **Update Mechanism**:
- **Manual Refresh**: Refresh button in header
- **Initial Load**: On component mount when authenticated
- **API Endpoint**: `/api/news?limit=100` or `/api/news?refresh=true&limit=100`
- **Features**:
- Fetches up to 100 news articles
- Displays article count
- Click to open in new tab
- Scrollable list (max-height: 400px)
- **State Management**:
- `useState` for news, loading, error, refreshing
- `useEffect` for initial fetch on authentication
#### 3. **Duties Widget (Tasks)**
- **File**: `components/flow.tsx`
- **Update Mechanism**:
- **Manual Refresh**: Refresh button in header
- **Initial Load**: On component mount
- **API Endpoint**: `/api/leantime/tasks?refresh=true`
- **Features**:
- Fetches tasks from Leantime
- Filters out completed tasks (status 5)
- Sorts by due date (oldest first)
- Shows up to 7 tasks
- Displays task status badges
- Links to Leantime ticket view
- **State Management**:
- `useState` for tasks, loading, error, refreshing
- `useEffect` for initial fetch
#### 4. **Email Widget**
- **File**: `components/email.tsx`
- **Update Mechanism**:
- **Manual Refresh**: Refresh button in header
- **Initial Load**: On component mount
- **API Endpoint**: `/api/courrier?folder=INBOX&page=1&perPage=5` (+ `&refresh=true` for refresh)
- **Features**:
- Fetches 5 most recent emails from INBOX
- Sorts by date (most recent first)
- Shows read/unread status
- Displays sender, subject, date
- Link to full email view (`/courrier`)
- **State Management**:
- `useState` for emails, loading, error, mailUrl
- `useEffect` for initial fetch
#### 5. **Parole Widget (Chat Messages)**
- **File**: `components/parole.tsx`
- **Update Mechanism**:
- **Manual Refresh**: Refresh button in header
- **Auto Polling**: Every 30 seconds (30000ms interval)
- **Initial Load**: On authentication
- **API Endpoint**: `/api/rocket-chat/messages` (+ `?refresh=true` for refresh)
- **Features**:
- Fetches recent chat messages from Rocket.Chat
- Displays sender avatar, name, message
- Shows room/channel information
- Click to navigate to full chat (`/parole`)
- Authentication check with sign-in prompt
- **State Management**:
- `useState` for messages, loading, error, refreshing
- `useEffect` for initial fetch and polling setup
- Session status checking
#### 6. **QuoteCard Widget**
- **File**: `components/quote-card.tsx`
- **Update Mechanism**: (To be verified - likely static or daily update)
### Widget Update Patterns
#### Common Update Mechanisms:
1. **Manual Refresh**:
- All widgets have a refresh button in their header
- Triggers API call with `refresh=true` parameter
- Shows loading/spinning state during refresh
2. **Auto Refresh/Polling**:
- **Calendar**: 5 minutes interval
- **Parole**: 30 seconds interval
- Others: On component mount only
3. **Session-Based Loading**:
- Widgets check authentication status
- Only fetch data when `status === 'authenticated'`
- Show loading state during authentication check
4. **Error Handling**:
- All widgets display error messages
- Retry buttons available
- Graceful degradation (empty states)
5. **State Management**:
- All widgets use React `useState` hooks
- Loading states managed locally
- Error states managed locally
### Related API Routes for Widgets
#### Calendar
- **GET `/api/calendars`**: Fetch calendars with events
- **GET `/api/calendars/[id]/events`**: Fetch events for specific calendar
- **GET `/api/calendars/[id]`**: Get calendar details
#### News
- **GET `/api/news`**: Fetch news articles
- Query params: `limit`, `refresh`
#### Tasks (Leantime)
- **GET `/api/leantime/tasks`**: Fetch tasks
- Query params: `refresh`
#### Email (Courrier)
- **GET `/api/courrier`**: Fetch emails
- Query params: `folder`, `page`, `perPage`, `refresh`
- **POST `/api/courrier/refresh`**: Force refresh email cache
#### Chat (Rocket.Chat)
- **GET `/api/rocket-chat/messages`**: Fetch messages
- Query params: `refresh`
---
## 📁 Complete File Structure
### Notification Files
```
app/api/notifications/
├── route.ts # GET /api/notifications
├── count/
│ └── route.ts # GET /api/notifications/count
├── read-all/
│ └── route.ts # POST /api/notifications/read-all
└── [id]/
└── read/
└── route.ts # POST /api/notifications/[id]/read
app/api/debug/
└── notifications/
└── route.ts # GET /api/debug/notifications
lib/services/notifications/
├── notification-service.ts # Core notification service
├── notification-adapter.interface.ts # Adapter interface
└── leantime-adapter.ts # Leantime adapter implementation
lib/types/
└── notification.ts # Notification type definitions
hooks/
└── use-notifications.ts # React hook for notifications
components/
├── notification-badge.tsx # Notification UI component
└── main-nav.tsx # Navigation with notification badge
```
### Widget Files
```
app/
└── page.tsx # Main dashboard with widgets
components/
├── calendar.tsx # Calendar widget
├── calendar-widget.tsx # Alternative calendar widget
├── calendar/
│ └── calendar-widget.tsx # Calendar-specific widget
├── news.tsx # News widget
├── flow.tsx # Duties/Tasks widget
├── email.tsx # Email widget
├── parole.tsx # Chat messages widget
└── quote-card.tsx # Quote widget
app/api/
├── calendars/
│ ├── route.ts # GET /api/calendars
│ └── [id]/
│ └── events/
│ └── route.ts # GET /api/calendars/[id]/events
├── news/
│ └── route.ts # GET /api/news
├── leantime/
│ └── tasks/
│ └── route.ts # GET /api/leantime/tasks
├── courrier/
│ ├── route.ts # GET /api/courrier
│ └── refresh/
│ └── route.ts # POST /api/courrier/refresh
└── rocket-chat/
└── messages/
└── route.ts # GET /api/rocket-chat/messages
```
---
## 🔄 Update Flow Diagrams
### Notification Update Flow
```
User Action / Polling
useNotifications Hook
API Route (/api/notifications or /api/notifications/count)
NotificationService.getInstance()
Check Redis Cache
├─ Cache Hit → Return cached data
└─ Cache Miss → Fetch from Adapters
LeantimeAdapter (and other adapters)
Transform & Aggregate
Store in Redis Cache
Return to API
Return to Hook
Update Component State
```
### Widget Update Flow
```
Component Mount / User Click Refresh
useEffect / onClick Handler
fetch() API Call
├─ With refresh=true (manual)
└─ Without refresh (initial)
API Route Handler
├─ Check Cache (if applicable)
├─ Fetch from External Service
└─ Return Data
Update Component State
├─ setLoading(false)
├─ setData(response)
└─ setError(null)
Re-render Component
```
---
## 🎯 Key Features Summary
### Notification System
- ✅ Multi-source aggregation (adapter pattern)
- ✅ Redis caching with TTL
- ✅ Background refresh scheduling
- ✅ Polling mechanism (60s interval)
- ✅ Rate limiting (5s minimum)
- ✅ Mark as read / Mark all as read
- ✅ Cache invalidation on updates
- ✅ Error handling and retry
- ✅ Source badges and links
### Widget System
- ✅ Manual refresh buttons
- ✅ Auto-refresh/polling (widget-specific intervals)
- ✅ Session-based loading
- ✅ Error handling
- ✅ Loading states
- ✅ Empty states
- ✅ Responsive design
---
## 📝 Notes
1. **Notification Sources**: Currently only Leantime adapter is implemented. Other adapters (Nextcloud, Gitea, Dolibarr, Moodle) are commented out in the service.
2. **Cache Strategy**:
- Notification counts: 30 seconds TTL
- Notification lists: 5 minutes TTL
- Widget data: Varies by widget (some use API-level caching)
3. **Polling Intervals**:
- Notifications: 60 seconds
- Calendar widget: 5 minutes
- Parole widget: 30 seconds
- Other widgets: On mount only
4. **Authentication**: All notification and widget APIs require authentication via NextAuth session.
5. **Error Handling**: All components implement error states with retry mechanisms.
---
## 🔍 Debugging
- Use `/api/debug/notifications` to test notification system
- Check browser console for detailed logs (all components log extensively)
- Check Redis cache keys: `notifications:count:{userId}`, `notifications:list:{userId}:{page}:{limit}`
---
*Last Updated: Generated from codebase analysis*

View File

@ -1,200 +0,0 @@
# Notification Mark-All-As-Read Debug - Next Steps
**Date**: 2026-01-01
**Status**: Enhanced logging added, awaiting test results
---
## 🔍 Current Situation
**Issue**: Mark all as read fails, but no detailed error logs are visible
**Observation from Logs**:
```
[NOTIFICATION_API] Mark all as read - Failed { userId: '...', duration: '209ms' }
```
**Missing Logs**:
- No `[LEANTIME_ADAPTER] markAllAsRead` logs
- No `[NOTIFICATION_SERVICE] markAllAsRead` detailed logs
**Possible Causes**:
1. Server not restarted with new code
2. Adapter method not being called
3. Error happening before adapter is reached
---
## ✅ Fixes Applied
### 1. Enhanced Service Layer Logging
**File**: `lib/services/notifications/notification-service.ts`
**Added Logging**:
- Logs when `markAllAsRead` is called
- Logs available adapters
- Logs each adapter being processed
- Logs configuration status for each adapter
- Logs when calling adapter's `markAllAsRead`
- Logs result from each adapter
- Logs overall success/failure
- Logs cache invalidation status
### 2. Enhanced Adapter Layer Logging
**File**: `lib/services/notifications/leantime-adapter.ts`
**Added Logging**:
- User email and Leantime user ID
- Request body and API URL
- Response status and body
- Parsed response with error details
- Success/failure status
---
## 🚀 Next Steps
### Step 1: Restart Server
**CRITICAL**: The server must be restarted for the new logging to take effect.
```bash
# Stop the server
sudo npm stop
# Or if using PM2/systemd, restart appropriately
# Start the server
sudo npm start
```
### Step 2: Test Mark All As Read
1. Open the notification dropdown
2. Click "Mark all as read"
3. Immediately check the server logs
### Step 3: Check Logs
**Expected Log Flow** (if working correctly):
```
[NOTIFICATION_API] Mark all as read endpoint called
[NOTIFICATION_API] Mark all as read - Processing { userId: '...', timestamp: '...' }
[NOTIFICATION_SERVICE] markAllAsRead called for user ...
[NOTIFICATION_SERVICE] Available adapters: leantime
[NOTIFICATION_SERVICE] Processing adapter: leantime
[NOTIFICATION_SERVICE] Adapter leantime is configured: true
[NOTIFICATION_SERVICE] Calling markAllAsRead on adapter leantime
[LEANTIME_ADAPTER] markAllAsRead called for ...
[LEANTIME_ADAPTER] markAllAsRead - User email: ...
[LEANTIME_ADAPTER] markAllAsRead - Leantime user ID: ...
[LEANTIME_ADAPTER] markAllAsRead - Request body: {...}
[LEANTIME_ADAPTER] markAllAsRead - API URL: ...
[LEANTIME_ADAPTER] markAllAsRead - Response status: XXX
[LEANTIME_ADAPTER] markAllAsRead - Response body: {...}
[LEANTIME_ADAPTER] markAllAsRead - Success: true/false
[NOTIFICATION_SERVICE] Adapter leantime markAllAsRead result: true/false
[NOTIFICATION_SERVICE] markAllAsRead overall success: true/false
```
**If Still Failing**, the logs will show:
- Which adapter is being processed
- Whether it's configured
- Whether the adapter method is called
- What error the Leantime API returns
- Where exactly it's failing
---
## 🔍 What to Look For
### If No Adapter Logs Appear
**Possible Issues**:
1. Server not restarted → **Solution**: Restart server
2. Adapter not configured → Check `isConfigured()` result
3. Error in service layer → Check service layer logs
### If Adapter Logs Appear But Fail
**Check These**:
1. **User Email**: Should show email address
2. **Leantime User ID**: Should show numeric ID (e.g., `2`)
3. **Request Body**: Should show valid JSON-RPC request
4. **Response Status**:
- `200` = Success (but check result)
- `400` = Bad request (check error body)
- `401` = Authentication issue
- `500` = Server error
5. **Response Body**: Will show the actual error from Leantime
### Common Leantime API Errors
1. **Method Not Found**:
- Error: `"Method not found"`
- Fix: Verify method name is correct
2. **Invalid Parameters**:
- Error: `"Invalid params"`
- Fix: Check parameter format
3. **Authentication Failed**:
- Error: `"Unauthorized"` or `401`
- Fix: Check API token
4. **User Not Found**:
- Error: `"User not found"`
- Fix: Verify Leantime user ID mapping
---
## 📊 Expected Log Output Examples
### Success Case
```
[LEANTIME_ADAPTER] markAllAsRead - Response status: 200
[LEANTIME_ADAPTER] markAllAsRead - Response body: {"jsonrpc":"2.0","result":true,"id":1}
[LEANTIME_ADAPTER] markAllAsRead - Parsed response: { hasResult: true, result: true, hasError: false }
[LEANTIME_ADAPTER] markAllAsRead - Success: true
```
### Failure Case - Method Not Found
```
[LEANTIME_ADAPTER] markAllAsRead - Response status: 200
[LEANTIME_ADAPTER] markAllAsRead - Response body: {"jsonrpc":"2.0","error":{"code":-32601,"message":"Method not found"},"id":1}
[LEANTIME_ADAPTER] markAllAsRead - Parsed response: { hasResult: false, hasError: true, error: {...} }
[LEANTIME_ADAPTER] markAllAsRead - API Error: { code: -32601, message: "Method not found" }
[LEANTIME_ADAPTER] markAllAsRead - Success: false
```
### Failure Case - Invalid Params
```
[LEANTIME_ADAPTER] markAllAsRead - Response status: 200
[LEANTIME_ADAPTER] markAllAsRead - Response body: {"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid params"},"id":1}
[LEANTIME_ADAPTER] markAllAsRead - API Error: { code: -32602, message: "Invalid params" }
```
---
## 🎯 Action Items
1. ✅ **Restart Server** (CRITICAL)
2. ⏳ **Test Mark All As Read**
3. ⏳ **Share Complete Logs** (from endpoint call to failure)
4. ⏳ **Analyze Error Details** (once logs are available)
---
## 📝 Summary
**Status**: Enhanced logging ready, awaiting server restart and test
**Next**: After restart, test and share logs to identify exact failure point
**Confidence**: 🟢 **HIGH** - Enhanced logging will reveal the root cause
---
**Generated**: 2026-01-01

View File

@ -1,239 +0,0 @@
# Notification System Fixes - Implementation Summary
**Date**: 2026-01-06
**Status**: ✅ All fixes implemented
---
## ✅ **Fix #1: Redis Caching for Leantime User ID**
### **Problem**:
- `getLeantimeUserId()` fetched ALL users from Leantime API every time
- No caching, causing slow performance and inconsistent results
- Race conditions between different calls
### **Solution**:
- Added Redis caching with 1-hour TTL
- Cache key: `leantime:userid:${email.toLowerCase()}`
- Checks cache first before making API call
- Caches result after successful fetch
### **Implementation**:
- **File**: `lib/services/notifications/leantime-adapter.ts`
- **Method**: `getLeantimeUserId()`
- **Cache TTL**: 3600 seconds (1 hour)
- **Static helper**: `invalidateUserIdCache()` for manual cache clearing
### **Benefits**:
- ✅ Faster performance (no API call if cached)
- ✅ More reliable (consistent results)
- ✅ Reduced API load on Leantime
- ✅ Better error recovery (can use cached value if API fails)
---
## ✅ **Fix #2: Retry Logic with Exponential Backoff**
### **Problem**:
- `getLeantimeUserId()` failed immediately on API errors
- No retry mechanism for transient failures
- Network errors caused permanent failures
### **Solution**:
- Added retry logic with up to 3 retries
- Exponential backoff: 1s, 2s, 4s (max 5s)
- Retries on:
- Server errors (5xx)
- Rate limiting (429)
- Network errors
- Certain JSON-RPC errors
### **Implementation**:
- **File**: `lib/services/notifications/leantime-adapter.ts`
- **Method**: `getLeantimeUserId()` with `fetchWithRetry()`
- **Max Retries**: 3
- **Backoff**: Exponential (1s → 2s → 4s)
### **Benefits**:
- ✅ Handles transient failures gracefully
- ✅ Better resilience to network issues
- ✅ Improved success rate for user ID lookup
---
## ✅ **Fix #3: Always Invalidate Cache After Marking**
### **Problem**:
- Cache only invalidated if marking operation succeeded
- If `getLeantimeUserId()` failed, cache stayed stale
- Count remained at old value (65) even after marking attempts
### **Solution**:
- Always invalidate cache after marking attempt
- Even if operation failed or returned `false`
- Ensures fresh data on next fetch
### **Implementation**:
- **File**: `lib/services/notifications/notification-service.ts`
- **Methods**:
- `markAsRead()` - Always invalidates cache
- `markAllAsRead()` - Always invalidates cache
- **Logic**: Cache invalidation happens regardless of success/failure
### **Benefits**:
- ✅ Count always refreshes after marking attempts
- ✅ User sees accurate data even if operation partially failed
- ✅ Better UX (no stale count stuck at 65)
---
## ✅ **Fix #4: Improved Count Accuracy**
### **Problem**:
- Count only based on first 100 notifications
- If user had >100 notifications, count was inaccurate
- Used cached notifications which might be stale
### **Solution**:
- Fetch up to 1000 notifications directly from API for counting
- Bypasses cache to get fresh data
- More accurate count for users with many notifications
### **Implementation**:
- **File**: `lib/services/notifications/leantime-adapter.ts`
- **Method**: `getNotificationCount()`
- **Change**: Fetches directly from API (up to 1000) instead of using cached `getNotifications()`
- **Warning**: Logs if count reaches 1000 (might have more)
### **Benefits**:
- ✅ More accurate count (up to 1000 notifications)
- ✅ Fresh data (bypasses cache)
- ✅ Better handling of users with many notifications
---
## ✅ **Fix #5: Better Error Handling and Logging**
### **Problem**:
- Errors were logged but not handled gracefully
- No way to manually clear user ID cache
- Limited error context in logs
### **Solution**:
- Added static method to invalidate user ID cache
- Improved error messages with more context
- Better logging throughout the flow
- Graceful degradation on errors
### **Implementation**:
- **File**: `lib/services/notifications/leantime-adapter.ts`
- **Static Method**: `invalidateUserIdCache(email)`
- **Improved Logging**: More detailed error messages
- **Error Recovery**: Continues operation even if caching fails
### **Benefits**:
- ✅ Better debugging with detailed logs
- ✅ Manual cache clearing for troubleshooting
- ✅ More resilient to partial failures
---
## 📊 **Expected Behavior After Fixes**
### **Before Fixes**:
1. Mark all as read → `getLeantimeUserId()` fails → Returns `false`
2. Cache NOT invalidated → Count stays 65 ❌
3. User sees stale count
### **After Fixes**:
1. Mark all as read → `getLeantimeUserId()` checks cache first ✅
2. If cached: Uses cached ID immediately ✅
3. If not cached: Fetches with retry logic ✅
4. Marks notifications as read ✅
5. **Always invalidates cache**
6. Count refresh gets fresh data → Shows 0 ✅
---
## 🎯 **Key Improvements**
### **Reliability**:
- ✅ User ID lookup is now cached and retried
- ✅ Cache always invalidated after marking
- ✅ Better error recovery
### **Performance**:
- ✅ Faster user ID lookup (cached)
- ✅ Reduced API calls to Leantime
- ✅ More efficient cache usage
### **Accuracy**:
- ✅ Count based on up to 1000 notifications
- ✅ Fresh data from API (bypasses stale cache)
- ✅ Better handling of edge cases
### **User Experience**:
- ✅ Count updates correctly after marking
- ✅ No more stuck count at 65
- ✅ Faster response times
---
## 🚀 **Testing Checklist**
After rebuild (`rm -rf .next && npm run build && npm start`):
1. ✅ **Test Mark All As Read**:
- Should work even if user ID lookup was previously failing
- Count should update to 0 after marking
- Cache should be invalidated
2. ✅ **Test Mark Single As Read**:
- Should work reliably
- Count should decrement correctly
- Cache should be invalidated
3. ✅ **Test Count Accuracy**:
- Should show accurate count (up to 1000)
- Should refresh after marking
- Should use fresh data from API
4. ✅ **Test User ID Caching**:
- First call should fetch from API
- Subsequent calls should use cache
- Should be faster on subsequent calls
5. ✅ **Test Retry Logic**:
- Should retry on transient failures
- Should eventually succeed or fail gracefully
- Should log retry attempts
---
## 📝 **Files Modified**
1. **`lib/services/notifications/leantime-adapter.ts`**:
- Added Redis caching for user ID
- Added retry logic with exponential backoff
- Improved `getNotificationCount()` to fetch directly from API
- Added `invalidateUserIdCache()` static method
- Better error handling and logging
2. **`lib/services/notifications/notification-service.ts`**:
- Always invalidate cache in `markAsRead()`
- Always invalidate cache in `markAllAsRead()`
- Better error handling and logging
---
## 🔧 **Configuration**
- **User ID Cache TTL**: 3600 seconds (1 hour)
- **Max Retries**: 3 attempts
- **Retry Backoff**: Exponential (1s, 2s, 4s, max 5s)
- **Count Fetch Limit**: 1000 notifications
---
**Status**: ✅ All fixes implemented and ready for testing

View File

@ -1,314 +0,0 @@
# Notification System Fixes - Implementation Summary
**Date**: 2026-01-06
**Status**: ✅ All High-Priority Fixes Implemented
---
## ✅ **Fix #1: Integrated Unified Refresh System**
### **Changes**:
- **File**: `hooks/use-notifications.ts`
- **Removed**: Custom polling logic (60s interval, debouncing)
- **Added**: `useUnifiedRefresh` hook integration
- **Result**: Uses centralized `RefreshManager` with 30s interval
### **Benefits**:
- ✅ Consistent refresh intervals across all widgets
- ✅ Reduced code duplication
- ✅ Better coordination with other refresh systems
- ✅ Automatic deduplication built-in
### **Code Changes**:
```typescript
// Before: Custom polling
pollingIntervalRef.current = setInterval(() => {
debouncedFetchCount();
}, 60000);
// After: Unified refresh
const { refresh: refreshCount } = useUnifiedRefresh({
resource: 'notifications-count',
interval: REFRESH_INTERVALS.NOTIFICATIONS_COUNT, // 30s
enabled: status === 'authenticated',
onRefresh: async () => {
await fetchNotificationCount(false);
},
priority: 'high',
});
```
---
## ✅ **Fix #2: Batch Processing for Mark All As Read**
### **Changes**:
- **File**: `lib/services/notifications/leantime-adapter.ts`
- **Added**: Batch processing (15 notifications per batch)
- **Added**: Delay between batches (200ms)
- **Added**: Automatic retry for failed notifications
- **Added**: Success rate threshold (80% = success)
### **Benefits**:
- ✅ Prevents API overload
- ✅ Reduces connection resets
- ✅ Better error recovery
- ✅ More reliable marking
### **Implementation**:
```typescript
// Process in batches of 15
const BATCH_SIZE = 15;
const BATCH_DELAY = 200;
const MAX_RETRIES = 2;
// Process each batch with delay
for (let i = 0; i < notificationIds.length; i += BATCH_SIZE) {
const batch = notificationIds.slice(i, i + BATCH_SIZE);
await Promise.all(batch.map(n => markSingleNotification(n)));
await delay(BATCH_DELAY); // Delay between batches
}
// Retry failed notifications
if (failedNotifications.length > 0) {
await retryFailedNotifications();
}
```
---
## ✅ **Fix #3: Fixed Cache TTL Consistency**
### **Changes**:
- **File**: `lib/services/notifications/notification-service.ts`
- **Changed**: List cache TTL: 5 minutes → 30 seconds
- **Aligned**: All cache TTLs to 30 seconds
- **File**: `app/api/notifications/route.ts` & `count/route.ts`
- **Changed**: Client cache: `max-age=30/10``max-age=0, must-revalidate`
### **Benefits**:
- ✅ Count and list always in sync
- ✅ Consistent behavior
- ✅ Predictable cache expiration
- ✅ No stale data inconsistencies
### **Before/After**:
```typescript
// Before
COUNT_CACHE_TTL = 30; // 30 seconds
LIST_CACHE_TTL = 300; // 5 minutes ❌
// After
COUNT_CACHE_TTL = 30; // 30 seconds ✅
LIST_CACHE_TTL = 30; // 30 seconds ✅
```
---
## ✅ **Fix #4: Added Progress Feedback**
### **Changes**:
- **File**: `hooks/use-notifications.ts`
- **Added**: `markingProgress` state: `{ current: number; total: number }`
- **File**: `components/notification-badge.tsx`
- **Added**: Progress bar UI during mark all as read
- **Added**: Progress text: "Marking X of Y..."
### **Benefits**:
- ✅ User knows operation is in progress
- ✅ Better UX (no silent waiting)
- ✅ Prevents multiple clicks
- ✅ Visual feedback
### **UI Changes**:
```tsx
{markingProgress && (
<div className="flex items-center gap-2">
<div className="animate-spin rounded-full h-4 w-4 border-b-2"></div>
<span>Marking {markingProgress.current} of {markingProgress.total}...</span>
</div>
)}
```
---
## ✅ **Fix #5: Improved Optimistic Updates**
### **Changes**:
- **File**: `hooks/use-notifications.ts`
- **Added**: Polling mechanism to verify count updates
- **Changed**: Better timing for count refresh
- **Added**: Poll until count matches expected value
### **Benefits**:
- ✅ More accurate UI updates
- ✅ Less confusing count jumps
- ✅ Better error recovery
- ✅ Verifies server state matches UI
### **Implementation**:
```typescript
// Poll until count matches expected value
let pollCount = 0;
const maxPolls = 5;
const pollInterval = 500;
const pollForCount = async () => {
if (pollCount >= maxPolls) return;
pollCount++;
await fetchNotificationCount(true);
if (pollCount < maxPolls) {
setTimeout(pollForCount, pollInterval);
}
};
```
---
## ✅ **Fix #6: Added Request Deduplication**
### **Changes**:
- **File**: `hooks/use-notifications.ts`
- **Added**: `requestDeduplicator` for all fetch calls
- **Result**: Prevents duplicate API calls within 2-second window
### **Benefits**:
- ✅ Fewer API calls
- ✅ Better performance
- ✅ Reduced server load
- ✅ Prevents race conditions
### **Implementation**:
```typescript
// Before: Direct fetch
const response = await fetch(url);
// After: Deduplicated fetch
const data = await requestDeduplicator.execute(
`notifications-count-${userId}`,
async () => {
const response = await fetch(url);
return response.json();
},
2000 // 2 second deduplication window
);
```
---
## ✅ **Fix #7: Cached User Email**
### **Changes**:
- **File**: `lib/services/notifications/leantime-adapter.ts`
- **Added**: Redis cache for user email (30-minute TTL)
- **Result**: Reduces session lookups
### **Benefits**:
- ✅ Better performance
- ✅ Fewer session calls
- ✅ More consistent
- ✅ Reduced overhead
---
## 📊 **Performance Improvements**
### **Before**:
- Polling: Every 60 seconds
- Cache TTL: Inconsistent (30s / 5min)
- Mark all: All parallel (can timeout)
- No deduplication
- No progress feedback
### **After**:
- Refresh: Every 30 seconds (unified)
- Cache TTL: Consistent (30s / 30s)
- Mark all: Batched (15 at a time, 200ms delay)
- Request deduplication: 2-second window
- Progress feedback: Real-time UI updates
### **Expected Results**:
- **50-70% reduction** in API calls
- **30-40% faster** response times
- **80-90% success rate** for mark all (vs 60-70% before)
- **Better UX** with progress indicators
---
## 🎯 **Files Modified**
1. ✅ `hooks/use-notifications.ts`
- Integrated unified refresh
- Added request deduplication
- Added progress tracking
- Improved optimistic updates
2. ✅ `lib/services/notifications/leantime-adapter.ts`
- Batch processing for mark all
- Retry logic with exponential backoff
- User email caching
3. ✅ `lib/services/notifications/notification-service.ts`
- Fixed cache TTL consistency (30s for all)
4. ✅ `app/api/notifications/route.ts`
- Updated client cache headers
5. ✅ `app/api/notifications/count/route.ts`
- Updated client cache headers
6. ✅ `components/notification-badge.tsx`
- Added progress UI
- Better loading states
---
## 🚀 **Testing Checklist**
After rebuild (`rm -rf .next && npm run build && npm start`):
1. ✅ **Unified Refresh**:
- Count should refresh every 30 seconds
- Should use centralized refresh manager
- No duplicate polling
2. ✅ **Batch Processing**:
- Mark all as read should process in batches
- Should show progress (if implemented)
- Should be more reliable (80-90% success)
3. ✅ **Cache Consistency**:
- Count and list should always be in sync
- Cache should expire after 30 seconds
- No stale data
4. ✅ **Progress Feedback**:
- Should show progress bar during mark all
- Should display "Marking X of Y..."
- Should prevent multiple clicks
5. ✅ **Request Deduplication**:
- Multiple rapid calls should be deduplicated
- Should see fewer API calls in logs
- Better performance
---
## 📝 **Next Steps (Optional)**
### **Medium Priority** (Future):
1. Real-time progress updates (WebSocket/SSE)
2. Connection pooling for API calls
3. Better error messages for users
4. Cancel operation button
### **Low Priority** (Nice to Have):
1. WebSocket for real-time notifications
2. Push notifications
3. Notification grouping
4. Filtering and sorting
---
**Status**: ✅ All high-priority fixes implemented and ready for testing

View File

@ -1,526 +0,0 @@
# Complete Notification Flow Analysis
**Date**: 2026-01-06
**Purpose**: Trace the entire notification system flow to identify issues and improvements
---
## 🔍 **FLOW 1: Initial Page Load & Count Display**
### Step-by-Step Flow:
1. **Component Mount** (`notification-badge.tsx`)
- `useNotifications()` hook initializes
- `useEffect` triggers when `status === 'authenticated'`
- Calls `fetchNotificationCount(true)` (force refresh)
- Calls `fetchNotifications()`
- Starts polling every 60 seconds
2. **Count Fetch** (`use-notifications.ts` → `/api/notifications/count`)
- Hook calls `/api/notifications/count?_t=${Date.now()}` (cache-busting)
- API route authenticates user
- Calls `NotificationService.getNotificationCount(userId)`
3. **Service Layer** (`notification-service.ts`)
- **Checks Redis cache first** (`notifications:count:${userId}`)
- If cached: Returns cached data immediately
- If not cached: Fetches from adapters
4. **Adapter Layer** (`leantime-adapter.ts`)
- `getNotificationCount()` calls `getNotifications(userId, 1, 100)`
- **⚠️ ISSUE**: Only fetches first 100 notifications for counting
- Filters unread: `notifications.filter(n => !n.isRead).length`
- Returns count object
5. **Cache Storage**
- Service stores count in Redis with 30-second TTL
- Returns to API route
- API returns to hook
- Hook updates React state: `setNotificationCount(data)`
6. **UI Update**
- Badge displays `notificationCount.unread`
- Shows "65" if 65 unread notifications
---
## 🔍 **FLOW 2: Mark Single Notification as Read**
### Step-by-Step Flow:
1. **User Action** (`notification-badge.tsx`)
- User clicks "Mark as read" button
- Calls `handleMarkAsRead(notificationId)`
- Calls `markAsRead(notificationId)` from hook
2. **Hook Action** (`use-notifications.ts`)
- Makes POST to `/api/notifications/${notificationId}/read`
- **Optimistic UI Update**:
- Updates notification in state: `isRead: true`
- Decrements count: `unread: Math.max(0, prev.unread - 1)`
- Waits 100ms, then calls `fetchNotificationCount(true)`
3. **API Route** (`app/api/notifications/[id]/read/route.ts`)
- Authenticates user
- Extracts notification ID: `leantime-2732` → splits to get source and ID
- Calls `NotificationService.markAsRead(userId, notificationId)`
4. **Service Layer** (`notification-service.ts`)
- Extracts source: `leantime` from ID
- Gets adapter: `this.adapters.get('leantime')`
- Calls `adapter.markAsRead(userId, notificationId)`
5. **Adapter Layer** (`leantime-adapter.ts`)
- **Gets user email from session**: `getUserEmail()`
- **Gets Leantime user ID**: `getLeantimeUserId(email)`
- **⚠️ CRITICAL ISSUE**: If `getLeantimeUserId()` fails → returns `false`
- If successful: Calls Leantime API `markNotificationRead`
- Returns success/failure
6. **Cache Invalidation** (`notification-service.ts`)
- If `markAsRead()` returns `true`:
- Calls `invalidateCache(userId)`
- Deletes count cache: `notifications:count:${userId}`
- Deletes all list caches: `notifications:list:${userId}:*`
- If returns `false`: **Cache NOT invalidated**
7. **Count Refresh** (`use-notifications.ts`)
- After 100ms delay, calls `fetchNotificationCount(true)`
- Fetches fresh count from API
- **⚠️ ISSUE**: If cache wasn't invalidated, might get stale count
---
## 🔍 **FLOW 3: Mark All Notifications as Read**
### Step-by-Step Flow:
1. **User Action** (`notification-badge.tsx`)
- User clicks "Mark all read" button
- Calls `handleMarkAllAsRead()`
- Calls `markAllAsRead()` from hook
2. **Hook Action** (`use-notifications.ts`)
- Makes POST to `/api/notifications/read-all`
- **Optimistic UI Update**:
- Sets all notifications: `isRead: true`
- Sets count: `unread: 0`
- Waits 200ms, then calls `fetchNotificationCount(true)`
3. **API Route** (`app/api/notifications/read-all/route.ts`)
- Authenticates user
- Calls `NotificationService.markAllAsRead(userId)`
4. **Service Layer** (`notification-service.ts`)
- Loops through all adapters
- For each adapter:
- Checks if configured
- Calls `adapter.markAllAsRead(userId)`
- Collects results: `[true/false, ...]`
- Determines: `success = results.every(r => r)`, `anySuccess = results.some(r => r)`
- **Cache Invalidation**:
- If `anySuccess === true`: Invalidates cache ✅
- If `anySuccess === false`: **Cache NOT invalidated**
5. **Adapter Layer** (`leantime-adapter.ts`)
- **Gets user email**: `getUserEmail()`
- **Gets Leantime user ID**: `getLeantimeUserId(email)`
- **⚠️ CRITICAL ISSUE**: If this fails → returns `false` immediately
- If successful:
- Fetches all notifications directly from API (up to 1000)
- Filters unread: `rawNotifications.filter(n => n.read === 0)`
- Marks each individually using `markNotificationRead`
- Returns success if any were marked
6. **Cache Invalidation** (`notification-service.ts`)
- Only happens if `anySuccess === true`
- **⚠️ ISSUE**: If `getLeantimeUserId()` fails, `anySuccess = false`
- Cache stays stale → count remains 65
7. **Count Refresh** (`use-notifications.ts`)
- After 200ms, calls `fetchNotificationCount(true)`
- **⚠️ ISSUE**: If cache wasn't invalidated, gets stale count from cache
---
## 🔍 **FLOW 4: Fetch Notification List**
### Step-by-Step Flow:
1. **User Opens Dropdown** (`notification-badge.tsx`)
- `handleOpenChange(true)` called
- Calls `manualFetch()` which calls `fetchNotifications(1, 10)`
2. **Hook Action** (`use-notifications.ts`)
- Makes GET to `/api/notifications?page=1&limit=20`
- Updates state: `setNotifications(data.notifications)`
3. **API Route** (`app/api/notifications/route.ts`)
- Authenticates user
- Calls `NotificationService.getNotifications(userId, page, limit)`
4. **Service Layer** (`notification-service.ts`)
- **Checks Redis cache first**: `notifications:list:${userId}:${page}:${limit}`
- If cached: Returns cached data immediately
- If not cached: Fetches from adapters
5. **Adapter Layer** (`leantime-adapter.ts`)
- Gets user email and Leantime user ID
- Calls Leantime API `getAllNotifications` with pagination
- Transforms notifications to our format
- Returns array
6. **Cache Storage**
- Service stores list in Redis with 5-minute TTL
- Returns to API
- API returns to hook
- Hook updates React state
---
## 🐛 **IDENTIFIED ISSUES**
### **Issue #1: getLeantimeUserId() Fails Inconsistently**
**Problem**:
- `getLeantimeUserId()` works in `getNotifications()` and `getNotificationCount()`
- But fails in `markAllAsRead()` and sometimes in `markAsRead()`
- Logs show: `"User not found in Leantime: a.tmiri@clm.foundation"`
**Root Cause**:
- `getLeantimeUserId()` calls Leantime API `getAll` users endpoint
- Fetches ALL users, then searches for matching email
- **Possible causes**:
1. **Race condition**: API call happens at different times
2. **Session timing**: Session might be different between calls
3. **API rate limiting**: Leantime API might throttle requests
4. **Caching issue**: No caching of user ID lookup
**Impact**:
- Mark all as read fails → cache not invalidated → count stays 65
- Mark single as read might fail → cache not invalidated → count doesn't update
**Solution**:
- Cache Leantime user ID in Redis with longer TTL
- Add retry logic with exponential backoff
- Add better error handling and logging
---
### **Issue #2: Cache Invalidation Only on Success**
**Problem**:
- Cache is only invalidated if `markAsRead()` or `markAllAsRead()` returns `true`
- If operation fails (e.g., `getLeantimeUserId()` fails), cache stays stale
- Count remains at old value (65)
**Root Cause**:
```typescript
if (success) {
await this.invalidateCache(userId);
}
```
**Impact**:
- User sees stale count even after attempting to mark as read
- UI shows optimistic update, but server count doesn't match
**Solution**:
- Always invalidate cache after marking attempt (even on failure)
- Or: Invalidate cache before marking, then refresh after
- Or: Use optimistic updates with eventual consistency
---
### **Issue #3: Count Based on First 100 Notifications**
**Problem**:
- `getNotificationCount()` only fetches first 100 notifications
- If user has 200 notifications with 66 unread, count shows 66
- But if 66 unread are beyond first 100, count is wrong
**Root Cause**:
```typescript
const notifications = await this.getNotifications(userId, 1, 100);
const unreadCount = notifications.filter(n => !n.isRead).length;
```
**Impact**:
- Count might be inaccurate if >100 notifications exist
- User might see "66 unread" but only 10 displayed (pagination)
**Solution**:
- Use dedicated count API if Leantime provides one
- Or: Fetch all notifications for counting (up to reasonable limit)
- Or: Show "66+ unread" if count reaches 100
---
### **Issue #4: Race Condition Between Cache Invalidation and Count Fetch**
**Problem**:
- Hook calls `fetchNotificationCount(true)` after 100-200ms delay
- But cache invalidation might not be complete
- Count fetch might still get stale cache
**Root Cause**:
```typescript
setTimeout(() => {
fetchNotificationCount(true);
}, 200);
```
**Impact**:
- Count might not update immediately after marking
- User sees optimistic update, then stale count
**Solution**:
- Increase delay to 500ms
- Or: Poll count until it matches expected value
- Or: Use WebSocket/SSE for real-time updates
---
### **Issue #5: No Caching of Leantime User ID**
**Problem**:
- `getLeantimeUserId()` fetches ALL users from Leantime API every time
- No caching, so repeated calls are slow and might fail
- Different calls might get different results (race condition)
**Root Cause**:
- No Redis cache for user ID mapping
- Each call makes full API request
**Impact**:
- Slow performance
- Inconsistent results
- API rate limiting issues
**Solution**:
- Cache user ID in Redis: `leantime:userid:${email}` with 1-hour TTL
- Invalidate cache only when user changes or on explicit refresh
---
### **Issue #6: getNotificationCount Uses Cached getNotifications**
**Problem**:
- `getNotificationCount()` calls `getNotifications(userId, 1, 100)`
- `getNotifications()` uses cache if available
- Count might be based on stale cached notifications
**Root Cause**:
```typescript
async getNotificationCount(userId: string): Promise<NotificationCount> {
const notifications = await this.getNotifications(userId, 1, 100);
// Uses cached data if available
}
```
**Impact**:
- Count might be stale even if notifications were marked as read
- Cache TTL mismatch: count cache (30s) vs list cache (5min)
**Solution**:
- Fetch notifications directly from API for counting (bypass cache)
- Or: Use dedicated count endpoint
- Or: Invalidate list cache when count cache is invalidated
---
### **Issue #7: Optimistic Updates Don't Match Server State**
**Problem**:
- Hook optimistically updates count: `unread: 0`
- But server count might still be 65 (cache not invalidated)
- After refresh, count jumps back to 65
**Root Cause**:
- Optimistic update happens immediately
- Server cache invalidation might fail
- Count refresh gets stale data
**Impact**:
- Confusing UX: count goes to 0, then back to 65
- User thinks operation failed when it might have succeeded
**Solution**:
- Only show optimistic update if we're confident operation will succeed
- Or: Show loading state until server confirms
- Or: Poll until count matches expected value
---
## 🎯 **RECOMMENDED IMPROVEMENTS**
### **Priority 1: Fix getLeantimeUserId() Reliability**
1. **Cache User ID Mapping**
```typescript
// Cache key: leantime:userid:${email}
// TTL: 1 hour
// Invalidate on user update or explicit refresh
```
2. **Add Retry Logic**
```typescript
// Retry 3 times with exponential backoff
// Log each attempt
// Return cached value if API fails
```
3. **Better Error Handling**
```typescript
// Log full error details
// Return null only after all retries fail
// Don't fail entire operation on user ID lookup failure
```
---
### **Priority 2: Always Invalidate Cache After Marking**
1. **Invalidate Before Marking**
```typescript
// Invalidate cache first
// Then mark as read
// Then refresh count
```
2. **Or: Always Invalidate After Attempt**
```typescript
// Always invalidate cache after marking attempt
// Even if operation failed
// This ensures fresh data on next fetch
```
---
### **Priority 3: Fix Count Accuracy**
1. **Use Dedicated Count API** (if available)
```typescript
// Check if Leantime has count-only endpoint
// Use that instead of fetching all notifications
```
2. **Or: Fetch All for Counting**
```typescript
// Fetch up to 1000 notifications for counting
// Or use pagination to count all
```
3. **Or: Show "66+ unread" if limit reached**
```typescript
// If count === 100, show "100+ unread"
// Indicate there might be more
```
---
### **Priority 4: Improve Cache Strategy**
1. **Unified Cache Invalidation**
```typescript
// When count cache is invalidated, also invalidate list cache
// When list cache is invalidated, also invalidate count cache
// Keep them in sync
```
2. **Shorter Cache TTLs**
```typescript
// Count cache: 10 seconds (currently 30s)
// List cache: 1 minute (currently 5min)
// More frequent updates
```
3. **Cache Tags/Versioning**
```typescript
// Use cache version numbers
// Increment on invalidation
// Check version before using cache
```
---
### **Priority 5: Better Error Recovery**
1. **Graceful Degradation**
```typescript
// If mark as read fails, still invalidate cache
// Show error message to user
// Allow retry
```
2. **Retry Logic**
```typescript
// Retry failed operations automatically
// Exponential backoff
// Max 3 retries
```
---
## 📊 **FLOW DIAGRAM: Current vs Improved**
### **Current Flow (Mark All As Read)**:
```
User clicks → Hook → API → Service → Adapter
getLeantimeUserId() → FAILS ❌
Returns false → Service: anySuccess = false
Cache NOT invalidated ❌
Count refresh → Gets stale cache → Shows 65 ❌
```
### **Improved Flow (Mark All As Read)**:
```
User clicks → Hook → API → Service → Adapter
getLeantimeUserId() → Check cache first
If cached: Use cached ID ✅
If not cached: Fetch from API → Cache result ✅
Mark all as read → Success ✅
Always invalidate cache (even on partial failure) ✅
Count refresh → Gets fresh data → Shows 0 ✅
```
---
## 🚀 **IMPLEMENTATION PRIORITY**
1. **Fix getLeantimeUserId() caching** (High Priority)
- Add Redis cache for user ID mapping
- Add retry logic
- Better error handling
2. **Always invalidate cache** (High Priority)
- Invalidate cache even on failure
- Or invalidate before marking
3. **Fix count accuracy** (Medium Priority)
- Use dedicated count API or fetch all
- Show "66+ unread" if limit reached
4. **Improve cache strategy** (Medium Priority)
- Unified invalidation
- Shorter TTLs
- Cache versioning
5. **Better error recovery** (Low Priority)
- Graceful degradation
- Retry logic
- Better UX
---
**Status**: Analysis complete. Ready for implementation.

View File

@ -1,194 +0,0 @@
# Notification Issues - Analysis & Fixes
**Date**: 2026-01-01
**Issues Reported**:
1. Count shows 66 messages, but only 10 are displayed
2. "Mark all as read" fails
3. Count doesn't update after marking as read
---
## 🔍 Issue Analysis
### Issue 1: Count vs Display Discrepancy
**Symptom**:
- Badge shows: **66 unread notifications**
- Dropdown shows: **Only 10 notifications**
**Root Cause**:
1. **Count Logic**: `getNotificationCount()` calls `getNotifications(userId, 1, 100)` to count
- Gets first 100 notifications from Leantime
- Counts unread: 66
- This is correct for the first 100 notifications
2. **Display Logic**: `getNotifications()` is called with `limit: 20` (default)
- But only 10 are shown (possibly due to pagination or filtering)
- This is a display/pagination issue
**The Problem**:
- If Leantime has more than 100 notifications total, the count will be inaccurate
- The count only reflects the first 100 notifications
- Display shows fewer notifications than the count
**Solution**:
- ✅ Added warning log when count reaches 100 (may have more)
- ⚠️ Consider using a dedicated count API if Leantime provides one
- ⚠️ Consider fetching all notifications for accurate count (may be slow)
---
### Issue 2: Mark All As Read Fails
**Symptom**:
```
[NOTIFICATION_API] Mark all as read - Failed { userId: '...', duration: '197ms' }
```
**Root Cause**:
- Leantime API call is failing
- No detailed error logging to see why
**Solution Applied**:
- ✅ Added comprehensive error logging to `markAllAsRead()`:
- Logs user email and Leantime user ID
- Logs request body and API URL
- Logs response status and body
- Logs parsed response with error details
- Logs exceptions with stack traces
**Next Steps**:
1. Test mark-all-as-read again
2. Check logs for detailed error information
3. Verify Leantime API method name is correct
4. Check if Leantime API requires different parameters
---
## 🔧 Fixes Applied
### 1. Enhanced Error Logging in `markAllAsRead`
**File**: `lib/services/notifications/leantime-adapter.ts`
**Changes**:
- Added detailed logging at each step
- Logs request details (body, URL)
- Logs response details (status, body, parsed data)
- Logs errors with full context
- Logs success/failure status
**Expected Log Output**:
```
[LEANTIME_ADAPTER] markAllAsRead called for ...
[LEANTIME_ADAPTER] markAllAsRead - User email: ...
[LEANTIME_ADAPTER] markAllAsRead - Leantime user ID: ...
[LEANTIME_ADAPTER] markAllAsRead - Request body: {...}
[LEANTIME_ADAPTER] markAllAsRead - API URL: ...
[LEANTIME_ADAPTER] markAllAsRead - Response status: 200
[LEANTIME_ADAPTER] markAllAsRead - Response body: {...}
[LEANTIME_ADAPTER] markAllAsRead - Parsed response: {...}
[LEANTIME_ADAPTER] markAllAsRead - Success: true/false
```
---
### 2. Enhanced Count Logging
**File**: `lib/services/notifications/leantime-adapter.ts`
**Changes**:
- Added warning when count reaches 100 (may have more notifications)
- Added read count to logging
- Added note about potential inaccuracy
---
## 🎯 Next Steps
### Immediate Testing
1. **Test Mark All As Read**
- Click "Mark all as read"
- Check logs for detailed error information
- Look for `[LEANTIME_ADAPTER] markAllAsRead` entries
2. **Verify Count Accuracy**
- Check if Leantime has more than 100 notifications
- Verify count matches actual unread notifications
- Check if count updates after marking as read
### Potential Issues to Check
1. **Leantime API Method Name**
- Current: `leantime.rpc.Notifications.Notifications.markAllNotificationsAsRead`
- Verify this is the correct method name in Leantime API
2. **Leantime API Parameters**
- Current: `{ userId: leantimeUserId }`
- May need additional parameters
3. **Leantime API Response Format**
- Check if response format matches expected format
- May need to handle different response structures
---
## 📊 Expected Behavior After Fixes
### Mark All As Read
**Success Case**:
```
[NOTIFICATION_API] Mark all as read endpoint called
[NOTIFICATION_API] Mark all as read - Processing { userId: '...', timestamp: '...' }
[LEANTIME_ADAPTER] markAllAsRead called for ...
[LEANTIME_ADAPTER] markAllAsRead - Success: true
[NOTIFICATION_API] Mark all as read - Success { userId: '...', duration: 'Xms' }
[NOTIFICATION_SERVICE] Invalidated notification caches for user ...
```
**Failure Case** (with detailed error):
```
[NOTIFICATION_API] Mark all as read endpoint called
[LEANTIME_ADAPTER] markAllAsRead called for ...
[LEANTIME_ADAPTER] markAllAsRead - Response status: 400
[LEANTIME_ADAPTER] markAllAsRead - Response body: {"error": {...}}
[LEANTIME_ADAPTER] markAllAsRead - API Error: {...}
[NOTIFICATION_API] Mark all as read - Failed { userId: '...', duration: 'Xms' }
```
---
## 🔍 Debugging Checklist
When testing, check logs for:
- [ ] `[LEANTIME_ADAPTER] markAllAsRead - User email:` (should show email)
- [ ] `[LEANTIME_ADAPTER] markAllAsRead - Leantime user ID:` (should show ID)
- [ ] `[LEANTIME_ADAPTER] markAllAsRead - Request body:` (should show JSON-RPC request)
- [ ] `[LEANTIME_ADAPTER] markAllAsRead - Response status:` (should be 200 for success)
- [ ] `[LEANTIME_ADAPTER] markAllAsRead - Response body:` (should show API response)
- [ ] `[LEANTIME_ADAPTER] markAllAsRead - Parsed response:` (should show result/error)
- [ ] `[LEANTIME_ADAPTER] markAllAsRead - Success:` (should be true/false)
---
## 📝 Summary
**Fixes Applied**:
1. ✅ Enhanced error logging in `markAllAsRead`
2. ✅ Enhanced count logging with warnings
**Next Actions**:
1. Test mark-all-as-read functionality
2. Review detailed error logs
3. Fix Leantime API call based on error details
4. Verify count accuracy
**Status**: ⏳ **AWAITING TESTING** - Enhanced logging will reveal the root cause
---
**Generated**: 2026-01-01

View File

@ -1,202 +0,0 @@
# Notification Issue Analysis - Mark All Read Behavior
**Date**: 2026-01-06
**Issue**: Mark all read works initially, then connection issues occur
---
## 🔍 **What's Happening**
### **Initial Success**:
1. ✅ Dashboard shows 60 messages (count is working)
2. ✅ User clicks "Mark all read"
3. ✅ **First step works** - Marking operation starts successfully
### **Then Connection Issues**:
```
failed to get redirect response [TypeError: fetch failed] {
[cause]: [Error: read ECONNRESET] {
errno: -104,
code: 'ECONNRESET',
syscall: 'read'
}
}
Redis reconnect attempt 1, retrying in 100ms
Reconnecting to Redis..
```
---
## 📊 **Analysis**
### **What the Logs Show**:
1. **IMAP Pool Activity**:
```
[IMAP POOL] Size: 1, Active: 1, Connecting: 0, Max: 20
[IMAP POOL] Size: 0, Active: 0, Connecting: 0, Max: 20
```
- IMAP connections are being used and released
- This is normal behavior
2. **Connection Reset Error**:
- `ECONNRESET` - Connection was reset by peer
- Happens during a fetch request (likely to Leantime API)
- This is a **network/connection issue**, not a code issue
3. **Redis Reconnection**:
- Redis is trying to reconnect (expected behavior)
- Our retry logic is working
---
## 🎯 **Root Cause**
### **Scenario**:
1. User clicks "Mark all read"
2. System starts marking notifications (works initially)
3. During the process, a network connection to Leantime API is reset
4. This could happen because:
- **Network instability** between your server and Leantime
- **Leantime API timeout** (if marking many notifications takes too long)
- **Connection pool exhaustion** (too many concurrent requests)
- **Server-side rate limiting** (Leantime might be throttling requests)
### **Why It Works Initially Then Fails**:
- **First few notifications**: Marked successfully ✅
- **After some time**: Connection resets ❌
- **Result**: Partial success (some marked, some not)
---
## 🔧 **What Our Fixes Handle**
### **✅ What's Working**:
1. **User ID Caching**: Should prevent the "user not found" error
2. **Retry Logic**: Will retry failed requests automatically
3. **Cache Invalidation**: Always happens, so count will refresh
4. **Count Accuracy**: Fetches up to 1000 notifications
### **⚠️ What's Not Handled**:
1. **Long-running operations**: Marking 60 notifications individually can take time
2. **Connection timeouts**: If Leantime API is slow or times out
3. **Rate limiting**: If Leantime throttles too many requests
4. **Partial failures**: Some notifications marked, some not
---
## 💡 **What's Likely Happening**
### **Flow**:
```
1. User clicks "Mark all read"
2. System fetches 60 unread notifications ✅
3. Starts marking each one individually
4. First 10-20 succeed ✅
5. Connection resets (ECONNRESET) ❌
6. Remaining notifications fail to mark
7. Cache is invalidated (our fix) ✅
8. Count refresh shows remaining unread (e.g., 40 instead of 0)
```
### **Why Count Might Not Be 0**:
- Some notifications were marked (e.g., 20 out of 60)
- Connection reset prevented marking the rest
- Cache was invalidated (good!)
- Count refresh shows remaining unread (40 unread)
---
## 🎯 **Expected Behavior**
### **With Our Fixes**:
1. ✅ User ID lookup is cached (faster, more reliable)
2. ✅ Retry logic handles transient failures
3. ✅ Cache always invalidated (count will refresh)
4. ✅ Count shows accurate number (up to 1000)
### **What You Should See**:
- **First attempt**: Some notifications marked, count decreases (e.g., 60 → 40)
- **Second attempt**: More notifications marked, count decreases further (e.g., 40 → 20)
- **Eventually**: All marked, count reaches 0
### **If Connection Issues Persist**:
- Count will show remaining unread
- User can retry "Mark all read"
- Each retry will mark more notifications
- Eventually all will be marked
---
## 🔍 **Diagnostic Questions**
1. **How many notifications are marked?**
- Check if count decreases (e.g., 60 → 40 → 20 → 0)
- If it decreases, marking is working but incomplete
2. **Does retry help?**
- Click "Mark all read" again
- If count decreases further, retry logic is working
3. **Is it always the same number?**
- If count always stops at same number (e.g., always 40), might be specific notifications failing
- If count varies, it's likely connection issues
4. **Network stability?**
- Check if connection to Leantime API is stable
- Monitor for timeouts or rate limiting
---
## 📝 **Recommendations**
### **Immediate**:
1. **Retry the operation**: Click "Mark all read" again
- Should mark more notifications
- Count should decrease further
2. **Check logs for specific errors**:
- Look for which notification IDs are failing
- Check if it's always the same ones
3. **Monitor network**:
- Check connection stability to Leantime
- Look for timeout patterns
### **Future Improvements** (if needed):
1. **Batch marking**: Mark notifications in smaller batches (e.g., 10 at a time)
2. **Progress indicator**: Show "Marking X of Y..." to user
3. **Resume on failure**: Track which notifications were marked, resume from where it failed
4. **Connection pooling**: Better management of concurrent requests
---
## ✅ **Summary**
### **What's Working**:
- ✅ Initial marking starts successfully
- ✅ User ID caching prevents lookup failures
- ✅ Cache invalidation ensures count refreshes
- ✅ Retry logic handles transient failures
### **What's Failing**:
- ⚠️ Connection resets during long operations
- ⚠️ Partial marking (some succeed, some fail)
- ⚠️ Network instability between server and Leantime
### **Solution**:
- **Retry the operation**: Click "Mark all read" multiple times
- Each retry should mark more notifications
- Eventually all will be marked
---
**Status**: This is expected behavior with network issues. The fixes ensure the system recovers and continues working.

View File

@ -1,115 +0,0 @@
# Minio Troubleshooting Guide
This document outlines the fixes implemented for the mission file upload issues with Minio.
## Problem Description
Mission uploads (logo and attachments) were not working correctly:
- Files weren't appearing in Minio despite upload attempts
- Mission logos weren't displaying even though they were uploaded
- Participation field showed "Non spécifié" despite values in the database
- SDG/ODD icons weren't displaying correctly
## Implemented Fixes
### 1. Added URL Generation Function
Added a `getPublicUrl` function in `lib/s3.ts` that properly constructs URLs for files stored in Minio:
```typescript
export function getPublicUrl(filePath: string): string {
if (!filePath) return '';
if (filePath.startsWith('http')) return filePath; // Already a full URL
// Remove leading slash if present
const cleanPath = filePath.startsWith('/') ? filePath.substring(1) : filePath;
// Construct the full URL
const endpoint = S3_CONFIG.endpoint?.replace(/\/$/, ''); // Remove trailing slash if present
const bucket = S3_CONFIG.bucket;
// Return original path if no endpoint is configured
if (!endpoint) return cleanPath;
// Construct and return the full URL
return `${endpoint}/${bucket}/${cleanPath}`;
}
```
### 2. Updated Mission Display Page
Modified `app/missions/page.tsx` to use the `getPublicUrl` function when displaying mission logos:
```tsx
{mission.logo ? (
<img
src={mission.logo ? getPublicUrl(mission.logo) : ''}
alt={mission.name}
className="w-full h-full object-cover rounded-md border border-gray-200"
onError={(e) => {
// Error handling...
}}
/>
) : null}
```
### 3. Enhanced Upload API
Updated `/app/api/missions/upload/route.ts` to:
- Include additional logging
- Generate and return proper public URLs
- Improve error handling
### 4. Enhanced Mission Detail API
Modified `/app/api/missions/[missionId]/route.ts` to include public URLs in the response:
```typescript
const missionWithUrls = {
...mission,
logoUrl: mission.logo ? getPublicUrl(mission.logo) : null,
attachments: mission.attachments.map((attachment) => ({
...attachment,
publicUrl: getPublicUrl(attachment.filePath)
}))
};
```
### 5. Added Testing Tools
1. Browser Console Utilities:
- `window.testMinioConnection()` - Test Minio connectivity
- `window.getMinioUrl(path)` - Generate a public URL for debugging
2. Server-side Test Script:
- Created `scripts/test-minio-upload.js` to test uploads from the command line
- Tests uploading, downloading, and URL generation
## How to Test
1. **Using the browser console:**
```javascript
// Test connection and list files
window.testMinioConnection()
// Generate URL for a specific path
window.getMinioUrl('user-123/missions/456/logo.jpg')
```
2. **Using the server-side script:**
```bash
node scripts/test-minio-upload.js
```
## Required Environment Variables
Make sure these are properly set in your environment:
- `MINIO_S3_UPLOAD_BUCKET_URL` - The Minio endpoint URL
- `MINIO_AWS_REGION` - The AWS region (often 'us-east-1' for Minio)
- `MINIO_AWS_S3_UPLOAD_BUCKET_NAME` - The bucket name
- `MINIO_ACCESS_KEY` - Access key for Minio
- `MINIO_SECRET_KEY` - Secret key for Minio
## Additional Notes
1. The same Minio bucket is used for both Pages and Missions.
2. Pages functionality is working properly, suggesting the Minio configuration itself is correct.
3. Make sure that the bucket has proper permissions for public read access.
4. The URL paths for SDG/ODD icons were corrected to use `/F SDG Icons 2019 WEB/F-WEB-Goal-XX.png`

108
README.md
View File

@ -1,108 +0,0 @@
# Neah Email Application
A modern email client built with Next.js, featuring email composition, viewing, and management capabilities.
## Email Processing Workflow
The application handles email processing through a centralized workflow:
1. **Email Fetching**: Emails are fetched through the `/api/courrier` endpoints using user credentials stored in the database.
2. **Email Parsing**: Raw email content is parsed using:
- Server-side: `parseEmail` function from `lib/server/email-parser.ts` (which uses `simpleParser` from the `mailparser` library)
- API route: `/api/parse-email` provides a REST interface to the parser
3. **HTML Sanitization**: Email HTML content is sanitized and processed using:
- `sanitizeHtml` function in `lib/utils/email-utils.ts` (centralized implementation)
- DOMPurify with specific configuration to handle email content safely
4. **Email Display**: Sanitized content is rendered in the UI with proper styling and security measures
5. **Email Composition**: The `ComposeEmail` component handles email creation, replying, and forwarding
- Email is sent through the `/api/courrier/send` endpoint
## Key Features
- **Email Fetching and Management**: Connect to IMAP servers and manage email fetching and caching logic
- **Email Composition**: Rich text editor with reply and forwarding capabilities
- **Email Display**: Secure rendering of HTML emails
- **Attachment Handling**: View and download attachments
## Project Structure
The project follows a modular structure:
- `/app` - Next.js App Router structure with routes and API endpoints
- `/components` - React components organized by domain
- `/lib` - Core library code:
- `/server` - Server-only code like email parsing
- `/services` - Domain-specific services, including email service
- `/reducers` - State management logic
- `/utils` - Utility functions including the centralized email formatter
## Technologies
- Next.js 14+ with App Router
- React Server Components
- TailwindCSS for styling
- Mailparser for email parsing
- ImapFlow for email fetching
- DOMPurify for HTML sanitization
- Redis for caching
## State Management
Email state is managed through React context and reducers, with server data fetched through React Server Components or client-side API calls as needed.
# Email Formatting
## Centralized Email Formatter
All email formatting is now handled by a centralized formatter in `lib/utils/email-utils.ts`. This ensures consistent handling of:
- Reply and forward formatting
- HTML sanitization
- RTL/LTR text direction
- MIME encoding and decoding for email composition
Key functions include:
- `formatForwardedEmail`: Format emails for forwarding
- `formatReplyEmail`: Format emails for replying
- `sanitizeHtml`: Safely sanitize HTML email content
- `formatEmailForReplyOrForward`: Compatibility function for both
- `decodeComposeContent`: Parse MIME content for email composition
- `encodeComposeContent`: Create MIME-formatted content for sending emails
This centralized approach prevents formatting inconsistencies and direction problems when dealing with emails in different languages.
## Deprecated Functions
Several functions have been deprecated and removed in favor of centralized implementations:
- Check the `DEPRECATED_FUNCTIONS.md` file for a complete list of deprecated functions and their replacements.
## User Management API
The application provides endpoints for managing users in multiple systems:
- **Create User**:
- Endpoint: `POST /api/users`
- Creates users in Keycloak, Leantime, and Dolibarr (if they have "mediation" or "expression" roles)
- **Update User**:
- Endpoint: `PUT /api/users/[userId]`
- Updates user details in Keycloak
- **Delete User**:
- Endpoint: `DELETE /api/users?id=[userId]&email=[userEmail]`
- Deletes users from Keycloak, Leantime, and Dolibarr systems
- **Important**: Always include both `id` and `email` parameters for complete deletion across all systems
- The legacy endpoint `DELETE /api/users/[userId]` forwards to the above endpoint
- **Manage Roles**:
- Endpoint: `PUT /api/users/[userId]/roles`
- Updates user roles in Keycloak
- **Reset Password**:
- Endpoint: `PUT /api/users/[userId]/password`
- Resets user password in Keycloak

View File

@ -1,366 +0,0 @@
# Why Dashboard and Applications Have Separated Authentication Flows
## Executive Summary
The dashboard and applications use **two completely separate authentication mechanisms** that operate independently:
1. **Dashboard**: Uses **NextAuth.js** with JWT-based sessions (30 days)
2. **Applications**: Use **Keycloak SSO** directly via browser cookies
This separation is why logging out from the dashboard doesn't automatically log you out from applications opened directly in the browser.
---
## Architecture Overview
### Two Independent Authentication Systems
```
┌─────────────────────────────────────────────────────────────┐
│ AUTHENTICATION LAYERS │
├─────────────────────────────────────────────────────────────┤
│ │
│ ┌──────────────────────┐ ┌──────────────────────┐ │
│ │ DASHBOARD AUTH │ │ APPLICATION AUTH │ │
│ │ │ │ │ │
│ │ NextAuth.js │ │ Keycloak SSO │ │
│ │ (JWT Strategy) │ │ (Cookie-based) │ │
│ │ │ │ │ │
│ │ - Session: 30 days │ │ - Session: Variable │ │
│ │ - Stored in: Cookie │ │ - Stored in: Cookie │ │
│ │ - Domain: Dashboard │ │ - Domain: Keycloak │ │
│ │ - Independent │ │ - Independent │ │
│ └──────────────────────┘ └──────────────────────┘ │
│ │ │ │
│ └──────────┬───────────────────┘ │
│ │ │
│ ┌───────▼────────┐ │
│ │ KEYCLOAK │ │
│ │ (IdP Server) │ │
│ └────────────────┘ │
│ │
└─────────────────────────────────────────────────────────────┘
```
---
## Why They're Separated
### 1. Different Authentication Purposes
**Dashboard Authentication (NextAuth.js)**:
- Purpose: Authenticate the **Next.js dashboard application**
- Method: OAuth 2.0 flow → Get tokens → Store in JWT
- Session Management: NextAuth manages its own session lifecycle
- Storage: Encrypted JWT in HTTP-only cookie on dashboard domain
- Duration: 30 days (configurable in `app/api/auth/options.ts`)
**Application Authentication (Keycloak SSO)**:
- Purpose: Authenticate **standalone applications** (not embedded in dashboard)
- Method: Direct Keycloak authentication via browser cookies
- Session Management: Keycloak manages SSO session lifecycle
- Storage: Keycloak session cookies on Keycloak domain
- Duration: Configured in Keycloak (typically 30 minutes to a few hours)
### 2. Different Session Storage Locations
**Dashboard Session**:
```
Cookie Name: next-auth.session-token
Domain: dashboard.example.com
Path: /
HttpOnly: Yes
Secure: Yes (if HTTPS)
SameSite: Lax
Content: Encrypted JWT containing:
- accessToken (Keycloak OAuth token)
- refreshToken (Keycloak refresh token)
- idToken (Keycloak ID token)
- User info (id, email, roles, etc.)
```
**Application Session**:
```
Cookie Name: KEYCLOAK_SESSION
Domain: keycloak.example.com (or configured domain)
Path: /
HttpOnly: Yes
Secure: Yes
SameSite: Lax or None (for cross-site)
Content: Keycloak session identifier
```
### 3. Different Authentication Flows
**Dashboard Flow**:
```
1. User visits dashboard → /signin
2. NextAuth redirects to Keycloak OAuth endpoint
3. Keycloak authenticates user
4. Keycloak redirects back with authorization code
5. NextAuth exchanges code for tokens
6. NextAuth creates JWT session
7. JWT stored in dashboard cookie
8. Dashboard uses JWT for authentication
```
**Application Flow** (when opened directly):
```
1. User visits application directly (not via dashboard)
2. Application checks for Keycloak session cookie
3. If cookie exists → User is authenticated (SSO)
4. If cookie doesn't exist → Redirect to Keycloak login
5. Keycloak authenticates user
6. Keycloak sets session cookie
7. Application uses cookie for authentication
```
---
## Why Dashboard Logout Doesn't Log Out Applications
### The Problem
When you log out from the dashboard:
1. **Dashboard logout process**:
- Clears NextAuth session cookie (`next-auth.session-token`)
- Calls Keycloak logout endpoint with `id_token_hint`
- Keycloak clears **client session** for dashboard OAuth client
- Keycloak may clear SSO session (if it's the last client session)
2. **What happens to applications**:
- Applications don't know about dashboard logout
- Applications still have Keycloak SSO session cookie
- Applications continue to work because they use Keycloak cookies, not NextAuth
### Technical Reasons
#### Reason 1: Different Cookie Domains
**Dashboard Cookie**:
- Domain: `dashboard.example.com`
- Cleared when dashboard logs out
- Applications can't access this cookie (different domain)
**Keycloak SSO Cookie**:
- Domain: `keycloak.example.com` (or configured domain)
- Not cleared by dashboard logout (unless SSO session is cleared)
- Applications can access this cookie (same domain as Keycloak)
#### Reason 2: Independent Session Lifecycles
**NextAuth Session**:
- Managed by NextAuth.js
- Lifecycle: Created on login → Valid for 30 days → Cleared on logout
- Independent of Keycloak SSO session
**Keycloak SSO Session**:
- Managed by Keycloak server
- Lifecycle: Created on login → Valid until timeout or explicit logout → Cleared on logout
- Independent of NextAuth session
#### Reason 3: Different Authentication Mechanisms
**Dashboard**:
- Uses OAuth 2.0 tokens (access token, refresh token)
- Tokens stored in NextAuth JWT
- Authentication: Validate JWT → Extract tokens → Use tokens for API calls
**Applications**:
- Use Keycloak session cookies directly
- No OAuth tokens involved
- Authentication: Check for Keycloak session cookie → If exists, user is authenticated
#### Reason 4: Keycloak SSO Session Persistence
**Keycloak maintains two types of sessions**:
1. **Client Session** (per OAuth client):
- Specific to each OAuth client (dashboard, app1, app2, etc.)
- Cleared when that specific client logs out
- Dashboard logout clears dashboard's client session
2. **SSO Session** (realm-wide):
- Shared across all clients in the realm
- Persists even after individual client logouts
- Only cleared when:
- All client sessions are logged out
- Explicit SSO session logout
- Session timeout
- Admin API logout
**When dashboard logs out**:
- Dashboard's client session is cleared ✅
- SSO session may persist if other applications have active sessions ❌
- Applications continue to work because SSO session is still valid ❌
---
## Current Logout Flow Analysis
### What Happens When You Log Out from Dashboard
```
Step 1: User clicks logout in dashboard
Step 2: Dashboard calls NextAuth signOut()
→ Clears: next-auth.session-token cookie
→ Clears: Dashboard's NextAuth session
Step 3: Dashboard calls /api/auth/end-sso-session
→ Uses Keycloak Admin API
→ Calls: adminClient.users.logout({ id: userId })
→ Clears: All client sessions for user
→ May clear: SSO session (if it's the last client session)
Step 4: Dashboard redirects to Keycloak logout endpoint
→ URL: ${KEYCLOAK_ISSUER}/protocol/openid-connect/logout
→ Parameters: id_token_hint, post_logout_redirect_uri
→ Clears: Dashboard's client session
→ May clear: SSO session (if it's the last client session)
Step 5: Keycloak redirects back to /signin?logout=true
→ Dashboard shows logout message
```
### What Happens to Applications
```
Applications opened directly in browser:
Step 1: Application checks for Keycloak session cookie
→ Cookie: KEYCLOAK_SESSION
→ Domain: keycloak.example.com
Step 2: If SSO session still exists:
→ Application finds valid SSO session cookie ✅
→ Application authenticates user automatically ✅
→ User remains logged in ❌
Step 3: If SSO session was cleared:
→ Application doesn't find session cookie ✅
→ Application redirects to Keycloak login ✅
→ User must log in again ✅
```
### Why Applications Stay Logged In
**Scenario 1: SSO Session Persists**
- Dashboard logout clears client sessions
- But SSO session cookie still exists
- Applications check SSO session cookie → Still valid → User stays logged in
**Scenario 2: Other Applications Have Active Sessions**
- If other applications are open in other tabs/windows
- They have active client sessions
- Keycloak won't clear SSO session (because other clients are still active)
- All applications stay logged in
**Scenario 3: Cookie Domain Mismatch**
- Dashboard tries to clear Keycloak cookies client-side
- But cookies are on different domain (keycloak.example.com)
- Browser security prevents clearing cross-domain cookies
- Applications keep their cookies → Stay logged in
---
## Why This Architecture Exists
### Historical/Design Reasons
1. **Legacy Applications**:
- Applications may have existed before the dashboard
- They were designed to use Keycloak directly
- Dashboard was added later as a wrapper/portal
2. **Separation of Concerns**:
- Dashboard: Portal/aggregator (doesn't need to know about app internals)
- Applications: Standalone services (don't depend on dashboard)
3. **Flexibility**:
- Applications can be accessed directly (not just via dashboard)
- Applications can be used independently
- Dashboard is optional, not required
4. **SSO Design**:
- Keycloak SSO is designed to work across multiple applications
- Logging out from one app shouldn't log out from all apps
- This is by design for SSO functionality
### Technical Constraints
1. **Cookie Security**:
- Browsers prevent cross-domain cookie access
- Dashboard can't directly clear Keycloak cookies (different domain)
- Must use Keycloak logout endpoint or Admin API
2. **Stateless vs Stateful**:
- NextAuth: Stateless (JWT, no server-side session)
- Keycloak: Stateful (server-side session, cookies)
3. **OAuth vs Direct Authentication**:
- Dashboard: Uses OAuth 2.0 (tokens)
- Applications: Use direct Keycloak authentication (cookies)
---
## What Would Be Needed for Unified Logout
To make dashboard logout also log out applications, you would need:
### Option 1: Keycloak Front-Channel Logout (Recommended)
- Configure all applications to participate in Front-Channel Logout
- When dashboard logs out, Keycloak notifies all registered applications
- Applications receive logout notification and clear their sessions
- **Requires**: Keycloak configuration + Application support
### Option 2: Keycloak Single Logout (SLO)
- Configure all applications to participate in SLO
- When one application logs out, all applications are logged out
- **Requires**: Keycloak configuration + Application support
### Option 3: Clear SSO Session Explicitly
- Use Keycloak Admin API to end SSO session
- This clears the realm-wide SSO session
- All applications lose their authentication
- **Current Implementation**: Partially implemented (`/api/auth/end-sso-session`)
- **Issue**: May not clear SSO session cookie if other clients are active
### Option 4: Application Logout Endpoints
- Each application exposes a logout endpoint
- Dashboard calls all application logout endpoints
- Applications clear their own sessions
- **Requires**: Application modifications + Dashboard coordination
---
## Summary
### Why They're Separated
1. **Different purposes**: Dashboard is a portal, applications are standalone services
2. **Different storage**: Dashboard uses NextAuth JWT, applications use Keycloak cookies
3. **Different domains**: Cookies are on different domains (security prevents cross-domain access)
4. **Different lifecycles**: NextAuth session (30 days) vs Keycloak SSO session (variable)
5. **SSO design**: Keycloak SSO is designed to persist across client logouts
### Why Dashboard Logout Doesn't Log Out Applications
1. **SSO session persists**: Keycloak SSO session may not be cleared
2. **Other active sessions**: If other applications are open, SSO session stays active
3. **Cookie domain**: Dashboard can't directly clear Keycloak cookies (different domain)
4. **Independent mechanisms**: Applications don't know about NextAuth session state
### The Solution
To achieve unified logout, you need to:
- Configure Keycloak Front-Channel Logout or SLO
- Ensure all applications participate in logout notifications
- Or use Admin API to explicitly end SSO session (current implementation attempts this)
The current implementation (`/api/auth/end-sso-session`) tries to clear the SSO session, but it may not work if:
- Other applications have active sessions
- SSO session cookie is on a different domain
- Keycloak configuration prevents SSO session clearing

View File

@ -1,335 +0,0 @@
# Session Callback Logging - Impact Analysis
**Date**: 2026-01-01
**Purpose**: Analyze the impact of reducing session callback logging on the multi-stack architecture
---
## 🏗️ Architecture Overview
### Stack Components
1. **Next.js Dashboard** (this application)
2. **Keycloak** (SSO/Authentication provider)
3. **MinIO** (Object storage for files)
4. **External Services** (Leantime, Rocket.Chat, News API, etc.)
### Integration Points
- **Keycloak**: OAuth2/OIDC provider, session tokens, role extraction
- **MinIO**: File storage (mission logos, attachments), S3-compatible API
- **External APIs**: All require authenticated session
---
## 📋 Current Session Callback Logging
### What's Being Logged
```typescript
// Lines 407-472 in app/api/auth/options.ts
- === SESSION CALLBACK START ===
- Token error status
- Access token presence
- Refresh token presence
- Token roles
- Token sub (user ID)
- Token email
- Token name
- Token username
- User roles for session
- Creating session user object
- Setting session tokens
- ✅ Session created successfully
- Session user details
- === SESSION CALLBACK END ===
```
### Why It Was Added
**Historical Context** (from `DEBUG_502_CALLBACK.md`):
- Added specifically to debug **502 errors** with Keycloak callbacks
- Critical for diagnosing authentication failures
- Helps identify when session callback doesn't execute
- Essential for troubleshooting SSO flow issues
---
## 🔍 Impact Analysis
### 1. Keycloak Integration Impact
**Dependencies**:
- ✅ **No functional impact**: Logging doesn't affect Keycloak authentication
- ⚠️ **Debugging impact**: Removing logs makes troubleshooting harder
- ✅ **Error logging preserved**: Critical errors still logged
**Keycloak Flow**:
```
1. User authenticates → Keycloak
2. Keycloak redirects → Next.js callback
3. JWT callback extracts tokens
4. Session callback builds session ← LOGGING HERE
5. Session used for all API calls
```
**Recommendation**:
- Keep error logging (always)
- Make success logging conditional (DEBUG_SESSION flag)
---
### 2. MinIO Integration Impact
**Dependencies**:
- ✅ **No direct dependency**: MinIO doesn't use session callback logs
- ✅ **Uses session for auth**: Session object used to verify user permissions
- ✅ **No impact**: Logging changes won't affect MinIO operations
**MinIO Flow**:
```
1. API route calls getServerSession()
2. Session callback executes (builds session)
3. Session used to verify user authentication
4. MinIO operations proceed with authenticated user
```
**Recommendation**:
- ✅ **Safe to reduce logging**: No impact on MinIO functionality
---
### 3. External Services Impact
**Services**:
- Leantime (project management)
- Rocket.Chat (messaging)
- News API
- Email/IMAP
**Dependencies**:
- ✅ **No functional impact**: Services don't read logs
- ✅ **Session still created**: Logging doesn't affect session creation
- ✅ **Authentication works**: Session object still valid
**Recommendation**:
- ✅ **Safe to reduce logging**: No impact on external services
---
### 4. Monitoring & Debugging Impact
**Current Usage**:
- Debugging 502 errors (Keycloak callbacks)
- Troubleshooting authentication issues
- Monitoring session creation frequency
- Identifying session callback failures
**Impact of Reducing Logging**:
- ⚠️ **Harder to debug**: Less visibility into session creation
- ✅ **Still debuggable**: Error logging preserved
- ✅ **Can enable on-demand**: DEBUG_SESSION flag for troubleshooting
**Recommendation**:
- Use conditional logging with DEBUG_SESSION flag
- Keep error logging always enabled
- Document how to enable debug logging
---
## ✅ Safe Implementation Strategy
### Phase 1: Conditional Logging (Recommended)
**Approach**: Make success logging conditional, keep error logging always
```typescript
async session({ session, token }) {
try {
// Always log errors
if (token.error) {
console.error("❌ Session callback error:", token.error);
}
// Conditional verbose logging
const DEBUG_SESSION = process.env.DEBUG_SESSION === 'true' ||
process.env.NODE_ENV === 'development';
if (DEBUG_SESSION) {
console.log('=== SESSION CALLBACK START ===');
console.log('Token error:', token.error);
console.log('Has accessToken:', !!token.accessToken);
// ... rest of verbose logging
}
// Always log critical errors
if (token.error === "SessionNotActive" ||
token.error === "NoRefreshToken" ||
!token.accessToken ||
!token.refreshToken) {
console.log("❌ Session invalidated or tokens missing", {
error: token.error,
hasAccessToken: !!token.accessToken,
hasRefreshToken: !!token.refreshToken
});
return null as any;
}
// ... rest of callback logic
if (DEBUG_SESSION) {
console.log('✅ Session created successfully');
console.log('Session user id:', session.user.id);
console.log('=== SESSION CALLBACK END ===');
}
return session;
} catch (error) {
// Always log critical errors
console.error('❌❌❌ CRITICAL ERROR IN SESSION CALLBACK ❌❌❌');
console.error('Error:', error);
throw error;
}
}
```
**Benefits**:
- ✅ Production: Minimal logging (errors only)
- ✅ Development: Full logging for debugging
- ✅ On-demand: Enable with DEBUG_SESSION=true
- ✅ No functional impact
---
### Phase 2: Environment-Based Logging
**Alternative**: Use NODE_ENV
```typescript
const isDevelopment = process.env.NODE_ENV === 'development';
if (isDevelopment || token.error) {
// Verbose logging
}
```
**Benefits**:
- ✅ Simple implementation
- ✅ Automatic in development
- ⚠️ Less flexible than DEBUG_SESSION flag
---
## 🎯 Recommended Approach
### Option 1: DEBUG_SESSION Flag (Best)
**Implementation**:
- Add `DEBUG_SESSION` environment variable
- Default: `false` (minimal logging)
- Set to `true` when debugging needed
**Usage**:
```bash
# Production (minimal logging)
DEBUG_SESSION=false npm start
# Debugging (verbose logging)
DEBUG_SESSION=true npm start
```
**Pros**:
- ✅ Flexible (can enable on-demand)
- ✅ Production-friendly (minimal logs)
- ✅ Debug-friendly (full logs when needed)
- ✅ No code changes needed to toggle
**Cons**:
- ⚠️ Requires environment variable management
---
### Option 2: NODE_ENV Based (Simpler)
**Implementation**:
- Use `NODE_ENV === 'development'` for verbose logging
- Always log errors
**Pros**:
- ✅ Simple (no new env vars)
- ✅ Automatic (works with existing setup)
**Cons**:
- ⚠️ Less flexible (can't enable in production easily)
---
## 📊 Risk Assessment
| Risk | Impact | Mitigation |
|------|--------|------------|
| **Lost debugging capability** | Medium | Keep error logging, add DEBUG_SESSION flag |
| **Harder to troubleshoot 502 errors** | Medium | Document how to enable debug logging |
| **Performance impact** | Low | Logging overhead is minimal |
| **Functional impact** | None | Logging doesn't affect functionality |
---
## ✅ Final Recommendation
### Implementation Plan
1. **Keep Error Logging Always**
- Critical errors always logged
- Session invalidation always logged
- Exception handling always logged
2. **Make Success Logging Conditional**
- Use `DEBUG_SESSION` environment variable
- Default: `false` (production-friendly)
- Can enable: `DEBUG_SESSION=true` (debugging)
3. **Document Debugging Process**
- Add to README or troubleshooting guide
- Explain when to enable DEBUG_SESSION
- Document what logs to look for
4. **Test in Staging**
- Verify error logging still works
- Test with DEBUG_SESSION=true
- Test with DEBUG_SESSION=false
---
## 🔧 Implementation Checklist
- [ ] Update `app/api/auth/options.ts` with conditional logging
- [ ] Add `DEBUG_SESSION` to environment variable documentation
- [ ] Test error logging (should always work)
- [ ] Test success logging with DEBUG_SESSION=true
- [ ] Test success logging with DEBUG_SESSION=false
- [ ] Verify Keycloak authentication still works
- [ ] Verify MinIO operations still work
- [ ] Verify external services still work
- [ ] Update troubleshooting documentation
---
## 📝 Summary
**Impact Level**: 🟢 **LOW RISK**
**Key Findings**:
1. ✅ No functional impact on Keycloak, MinIO, or external services
2. ✅ Logging was added for debugging, not functionality
3. ✅ Error logging preserved (critical for troubleshooting)
4. ✅ Conditional logging provides flexibility
**Recommendation**:
- ✅ **Proceed with conditional logging**
- ✅ **Use DEBUG_SESSION flag for flexibility**
- ✅ **Keep error logging always enabled**
**Confidence**: 🟢 **HIGH** - Safe to implement
---
**Generated**: 2026-01-01
**Next Step**: Implement conditional logging in `app/api/auth/options.ts`

View File

@ -1,233 +0,0 @@
# NextAuth Session Duration: 30 Days vs 4 Hours - Security Analysis
## Current Configuration
**Current Setting** (`app/api/auth/options.ts:190`):
```typescript
session: {
strategy: "jwt",
maxAge: 30 * 24 * 60 * 60, // 30 days (2,592,000 seconds)
}
```
**Proposed Setting**:
```typescript
session: {
strategy: "jwt",
maxAge: 4 * 60 * 60, // 4 hours (14,400 seconds)
}
```
---
## Security Analysis
### ✅ **Why 4 Hours is Better for Security**
1. **Reduced Attack Window**:
- **30 days**: If session is compromised, attacker has 30 days of access
- **4 hours**: If session is compromised, attacker has maximum 4 hours of access
- **Risk Reduction**: 99.4% reduction in maximum exposure time
2. **Industry Best Practices**:
- **NIST Guidelines**: Recommend session timeouts of 2-8 hours for high-security applications
- **OWASP**: Recommends session timeouts based on risk level (typically 2-8 hours)
- **Common Practice**: Most enterprise applications use 4-8 hour sessions
3. **Device Security**:
- **30 days**: Device left unattended = 30 days of potential unauthorized access
- **4 hours**: Device left unattended = maximum 4 hours of potential access
- **Better for**: Shared devices, public computers, unattended workstations
4. **Compliance**:
- Many security standards (ISO 27001, SOC 2) require reasonable session timeouts
- 30 days is often considered too long for compliance
- 4 hours aligns better with security compliance requirements
5. **Stolen Session Cookie**:
- If session cookie is stolen (XSS, MITM), shorter duration limits damage
- 4 hours gives attacker limited time to exploit
- 30 days gives attacker extensive time to exploit
### ⚠️ **Considerations & Trade-offs**
1. **User Experience Impact**:
- **30 days**: Users rarely need to re-authenticate (convenient)
- **4 hours**: Users need to re-authenticate every 4 hours (less convenient)
- **Impact**: Moderate - users will need to log in more frequently
2. **Token Refresh Behavior**:
- **Good News**: Your code already handles token refresh automatically
- **How it works**:
- When NextAuth session expires (4 hours), JWT callback runs
- If `accessToken` is expired, it calls `refreshAccessToken()`
- Uses `refreshToken` to get new tokens from Keycloak
- Session is automatically renewed (if refresh token is still valid)
- **Result**: Users may not notice the 4-hour expiration if they're active
3. **Keycloak Refresh Token Lifetime**:
- **Important**: Keycloak refresh tokens typically last 7-30 days
- **What this means**:
- NextAuth session expires after 4 hours
- But refresh token is still valid (e.g., 7 days)
- NextAuth automatically refreshes tokens
- User stays logged in seamlessly (if active)
- **Only expires if**: User is inactive for longer than refresh token lifetime
4. **Keycloak Session Alignment**:
- **Current Issue**: Keycloak sessions typically expire in 30 minutes to a few hours
- **With 4-hour NextAuth session**:
- Better alignment with Keycloak session timeouts
- Reduces session mismatch issues
- Iframe applications will have more consistent session state
---
## How It Will Work
### Session Lifecycle with 4-Hour maxAge
```
User logs in
NextAuth creates JWT session (expires in 4 hours)
User is active for 2 hours
User makes request → NextAuth checks session
Session still valid (< 4 hours) Continue
User is active for 3 hours
User makes request → NextAuth checks session
Session still valid (< 4 hours) Continue
User is active for 4.5 hours (session expired)
User makes request → NextAuth checks session
Session expired → JWT callback runs
Checks accessToken expiration
If accessToken expired → Calls refreshAccessToken()
Uses refreshToken to get new tokens from Keycloak
If refreshToken still valid → New session created (another 4 hours)
User continues seamlessly (no re-authentication needed)
If refreshToken expired → User must re-authenticate
```
### When User Must Re-authenticate
**User must re-authenticate if**:
1. **Inactive for longer than refresh token lifetime** (typically 7-30 days)
2. **Refresh token is revoked** (logout, admin action, security event)
3. **Keycloak session is invalidated** (logout from another application)
**User does NOT need to re-authenticate if**:
1. **Active within refresh token lifetime** (automatic token refresh)
2. **Session expires but refresh token is valid** (automatic renewal)
---
## Recommendations
### ✅ **Recommendation: Implement 4-Hour Session**
**Reasons**:
1. ✅ **Significantly better security** (99.4% reduction in exposure window)
2. ✅ **Aligns with industry best practices** (NIST, OWASP)
3. ✅ **Better compliance** (meets security standards)
4. ✅ **Better alignment with Keycloak sessions**
5. ✅ **Minimal UX impact** (automatic token refresh handles renewal)
6. ✅ **Code already supports it** (token refresh mechanism exists)
### ⚠️ **Important Considerations**
1. **Verify Keycloak Refresh Token Lifetime**:
- Check Keycloak configuration for refresh token lifetime
- Ensure it's longer than 4 hours (typically 7-30 days)
- If shorter, users will need to re-authenticate frequently
2. **Monitor User Experience**:
- Track how often users need to re-authenticate
- If too frequent, consider increasing to 6-8 hours
- Balance security with usability
3. **Consider Activity-Based Extension**:
- Current implementation: Fixed 4-hour expiration
- Alternative: Extend session on activity (sliding window)
- Requires additional implementation (activity tracking)
4. **Keycloak Session Configuration**:
- Consider aligning Keycloak SSO session timeout with NextAuth
- Or ensure Keycloak session is longer than NextAuth session
- Prevents session mismatch issues
### 📋 **Implementation Checklist**
Before implementing:
- [ ] Verify Keycloak refresh token lifetime (should be > 4 hours)
- [ ] Test token refresh flow with 4-hour session
- [ ] Monitor user re-authentication frequency
- [ ] Consider user feedback on session duration
- [ ] Document the change for users (if needed)
- [ ] Update security documentation
---
## Comparison Table
| Aspect | 30 Days | 4 Hours | Winner |
|--------|---------|---------|--------|
| **Security** | Low (long exposure window) | High (short exposure window) | ✅ 4 Hours |
| **User Convenience** | High (rare re-authentication) | Medium (automatic refresh) | ✅ 30 Days |
| **Compliance** | Poor (too long) | Good (meets standards) | ✅ 4 Hours |
| **Risk Reduction** | Low | High (99.4% reduction) | ✅ 4 Hours |
| **Keycloak Alignment** | Poor (mismatch) | Good (better alignment) | ✅ 4 Hours |
| **Token Refresh** | Works | Works (same mechanism) | ✅ Tie |
---
## Conclusion
**Recommendation: Change to 4 hours**
**Why**:
- Significantly better security posture
- Aligns with industry best practices
- Better compliance with security standards
- Minimal UX impact (automatic token refresh)
- Better alignment with Keycloak session timeouts
- Code already supports it
**Implementation**:
- Simple change: `maxAge: 4 * 60 * 60`
- No code changes needed (token refresh already works)
- Monitor user experience and adjust if needed
**Alternative Consideration**:
- If 4 hours is too aggressive, consider 6-8 hours as a middle ground
- Still provides significant security improvement over 30 days
- Better user experience than 4 hours
---
## Final Verdict
**✅ Yes, change to 4 hours** - This is a good security practice that:
- Significantly reduces security risk
- Aligns with industry standards
- Has minimal UX impact (automatic refresh)
- Works with existing code
- Better aligns with Keycloak sessions
The only trade-off is slightly more frequent re-authentication for inactive users, but this is a reasonable security trade-off.

View File

@ -1,250 +0,0 @@
# SSO Flow Analysis - Keycloak External Logout Issue
## Current Flow Trace
### Scenario: User logs out from Keycloak directly, then accesses dashboard
**Step-by-step flow:**
1. **Initial State (Before Keycloak Logout)**
- User is logged into Dashboard via NextAuth
- NextAuth JWT contains:
- `accessToken`: Valid Keycloak OAuth token
- `refreshToken`: Valid Keycloak refresh token
- `idToken`: Valid Keycloak ID token
- Keycloak session cookies are set in browser
- Iframe applications can authenticate via Keycloak cookies
2. **User Logs Out from Keycloak Directly (External Application)**
- External application calls: `POST /realms/{realm}/protocol/openid-connect/logout`
- Keycloak invalidates:
- ✅ Keycloak session cookies (cleared)
- ✅ Keycloak refresh token (invalidated)
- ✅ Keycloak access token (invalidated)
- ❌ **NextAuth JWT still contains old tokens** (NextAuth doesn't know about logout)
- ❌ **NextAuth session cookie still valid** (30-day expiration)
3. **User Accesses Dashboard**
- Browser sends NextAuth session cookie
- NextAuth decrypts JWT
- JWT contains old (now invalid) tokens
- **Token expiration check**: `Date.now() < (token.accessTokenExpires as number) * 1000`
- If token hasn't expired yet (by timestamp), NextAuth returns existing token
- **Problem**: Token is invalid in Keycloak, but NextAuth doesn't know yet
4. **User Navigates to Iframe Application**
- `ResponsiveIframe` component mounts
- `useEffect` triggers: `refreshSession()`
- Calls: `GET /api/auth/refresh-keycloak-session`
5. **Refresh Endpoint Execution**
```
GET /api/auth/refresh-keycloak-session
→ getServerSession(authOptions)
→ Reads NextAuth JWT from cookie
→ JWT contains old refreshToken (invalid)
→ Calls Keycloak: POST /token with old refreshToken
→ Keycloak responds: { error: 'invalid_grant', error_description: 'Token is not active' }
→ Returns 401 with SessionInvalidated error
```
6. **ResponsiveIframe Handles Error**
- Detects `SessionInvalidated` error
- Redirects to `/signin`
- User signs in again
- Gets NEW tokens from Keycloak
7. **User Returns to Iframe (After Re-login)**
- **Problem**: If NextAuth JWT callback hasn't run yet, it might still have old tokens
- OR: The new session is created, but iframe component might be using cached session
- OR: The refresh endpoint is called again before new session is fully established
## Root Cause Analysis
### Issue 1: Stale Token Detection
**Problem**: NextAuth only tries to refresh tokens when they're expired (by timestamp). If a token is invalidated externally (Keycloak logout), NextAuth won't know until it tries to refresh.
**Current Flow**:
```
JWT Callback:
if (Date.now() < token.accessTokenExpires * 1000) {
return token; // Returns stale token without checking Keycloak
}
// Only refreshes if expired by timestamp
```
**What Should Happen**:
- When accessing iframe, we proactively refresh to validate token
- But if refresh fails, we need to clear the NextAuth session immediately
### Issue 2: Session Invalidation Timing
**Problem**: When refresh fails:
1. Refresh endpoint returns `SessionInvalidated`
2. ResponsiveIframe redirects to `/signin`
3. User signs in, gets new tokens
4. **But**: NextAuth JWT might still have old tokens cached until next JWT callback execution
**Current Behavior**:
- Redirect to signin happens
- User re-authenticates
- New session is created
- But old session might still be in browser cache/cookies
### Issue 3: Infinite Redirect Loop Potential
**Problem**: If the refresh endpoint keeps failing:
- ResponsiveIframe redirects to `/signin`
- User signs in
- Returns to iframe
- Refresh endpoint called again
- If new session isn't fully established, it might still use old tokens
- Loop continues
## Current Code Flow
### ResponsiveIframe Component Flow
```typescript
1. Component mounts with session
2. useEffect triggers refreshSession()
3. Calls GET /api/auth/refresh-keycloak-session
4. If 401 + SessionInvalidated:
→ window.location.href = '/signin'
→ User redirected
5. User signs in again
6. Returns to iframe page
7. Component mounts again
8. useEffect triggers refreshSession() again
9. If session still has old tokens → fails again
```
### Refresh Endpoint Flow
```typescript
GET /api/auth/refresh-keycloak-session
1. getServerSession(authOptions)
→ Reads JWT from cookie
→ JWT callback runs
→ If token expired: refreshAccessToken()
→ If token not expired: returns existing token (might be invalid!)
2. Uses session.refreshToken
3. Calls Keycloak refresh endpoint
4. If invalid_grant: Returns SessionInvalidated
```
### JWT Callback Flow
```typescript
async jwt({ token, account, profile }) {
// Initial login: account & profile present
if (account && profile) {
// Store tokens
}
// Subsequent requests
else if (token.accessToken) {
// Check expiration
if (Date.now() < token.accessTokenExpires * 1000) {
return token; // ⚠️ Returns token without validating with Keycloak
}
// Only refreshes if expired by timestamp
return refreshAccessToken(token);
}
}
```
## The Problem
**Key Issue**: NextAuth JWT callback only checks token expiration by timestamp. It doesn't validate that the token is still valid in Keycloak. So:
1. User logs out from Keycloak → Token invalidated
2. NextAuth JWT still has token (not expired by timestamp)
3. JWT callback returns existing token (assumes it's valid)
4. Refresh endpoint tries to use invalid refresh token
5. Fails, redirects to signin
6. User signs in, but if JWT callback hasn't run with new account, might still have old token
## Why It Gets Stuck
Looking at the logs:
```
Failed to refresh Keycloak session: { error: 'invalid_grant', error_description: 'Token is not active' }
GET /api/auth/refresh-keycloak-session 401
→ Redirects to /signin
→ User signs in
→ Returns to iframe
→ refresh-keycloak-session called again
→ Still fails (401)
```
**Possible reasons**:
1. **Session not fully updated**: After signin, NextAuth creates new session, but refresh endpoint might be reading old session from cookie before it's updated
2. **Token not refreshed in JWT**: The new tokens from signin might not be stored in JWT yet when refresh endpoint is called
3. **Cookie caching**: Browser might be sending old session cookie
4. **Race condition**: Refresh endpoint called before new session is established
## Recommendations (Without Code Changes)
### 1. Check Session State After Signin
After user signs in and is redirected back:
- Verify that `getServerSession()` returns new session with valid tokens
- Check that JWT callback has run and stored new tokens
- Ensure session cookie is updated in browser
### 2. Add Delay/Retry Logic
In ResponsiveIframe:
- After redirect from signin, wait a moment before calling refresh endpoint
- Or check if session has been updated before calling refresh
- Add retry logic with exponential backoff
### 3. Validate Token Before Using
In refresh endpoint:
- Before using refreshToken, validate that accessToken is still valid
- Or check token age - if token is old, force refresh even if not expired
### 4. Clear Session on Invalid Token
When refresh fails with invalid_grant:
- Don't just redirect - also clear NextAuth session cookie
- Force complete re-authentication
- Ensure old session is completely removed
### 5. Check Keycloak Session Status
Before calling refresh endpoint:
- Check if Keycloak session is still active
- Use Keycloak's userinfo endpoint to validate access token
- Only refresh if token is actually invalid
## Current Behavior Summary
**What's Happening**:
1. ✅ User logs out from Keycloak → Keycloak invalidates tokens
2. ✅ User accesses dashboard → NextAuth still has old tokens (not expired by timestamp)
3. ✅ User goes to iframe → Refresh endpoint called
4. ✅ Refresh fails → Detects invalid token
5. ✅ Redirects to signin → User re-authenticates
6. ⚠️ **Issue 1**: Storage initialization fails during signin (`createUserFolderStructure` not exported)
7. ⚠️ **Issue 2**: After re-authentication, refresh endpoint might still be using old session
8. ⚠️ **Result**: Gets stuck in redirect loop or keeps failing
**Root Cause**: NextAuth doesn't proactively validate tokens with Keycloak. It only checks expiration timestamps. When tokens are invalidated externally, NextAuth doesn't know until it tries to use them.
**Additional Issue Confirmed**:
- Storage initialization fails during signin process
- Error: `createUserFolderStructure is not a function`
- This prevents complete signin initialization
- May contribute to session not being fully established
---
**Analysis Date**: 2024
**Status**: Issue Identified
**Next Steps**: Implement proactive token validation or improve session invalidation handling

View File

@ -1,207 +0,0 @@
# SSO Flow Analysis - Confirmed Issues
## Browser Console Evidence
Based on the browser console error provided, here's what's happening:
### Confirmed Flow
1. **User logs out from Keycloak directly** (external application)
- Keycloak invalidates all tokens and session
2. **User accesses Dashboard**
- NextAuth session still exists (30-day expiration)
- JWT contains old, now-invalid tokens
- Token expiration check: `Date.now() < token.accessTokenExpires * 1000`
- If token hasn't expired by timestamp → JWT callback returns old token
- **Problem**: Token is invalid in Keycloak, but NextAuth doesn't validate it
3. **User navigates to iframe application**
- `ResponsiveIframe` component mounts
- Calls `GET /api/auth/refresh-keycloak-session`
- Refresh endpoint uses old `refreshToken` from session
- Keycloak responds: `{ error: 'invalid_grant', error_description: 'Token is not active' }`
- Returns 401 with `SessionInvalidated` error
4. **Redirect to Signin**
- `ResponsiveIframe` detects `SessionInvalidated`
- Redirects: `window.location.href = '/signin'`
- User lands on signin page
5. **Signin Process Starts**
- `app/signin/page.tsx` detects unauthenticated status
- Triggers: `signIn("keycloak", { callbackUrl: "/" })`
- User authenticates with Keycloak
- Gets NEW tokens from Keycloak
- NextAuth callback stores new tokens in JWT
6. **Storage Initialization Fails** ⚠️ **CONFIRMED ISSUE**
- Signin page detects session available
- Calls: `POST /api/storage/init`
- Storage endpoint tries to call: `createUserFolderStructure(session.user.id)`
- **Error**: `createUserFolderStructure is not a function`
- Storage initialization fails
- Signin page shows "Échec de l'initialisation"
- **Impact**: User might not be fully signed in, or session might not be complete
7. **User Returns to Iframe** (After Signin)
- Navigates to iframe application again
- `ResponsiveIframe` component mounts
- Calls refresh endpoint again
- **If storage init failed**: Session might not be fully established
- **If new session not ready**: Might still use old tokens
- Refresh fails again → Redirects to signin → Loop
## Confirmed Issues
### Issue 1: Storage Initialization Failure ✅ CONFIRMED
**Error**: `createUserFolderStructure is not a function`
**Location**: `app/api/storage/init/route.ts:16`
**Impact on SSO Flow**:
- Storage initialization is part of signin process
- If it fails, signin might not complete properly
- Session might not be fully established
- When user tries to access iframe, refresh endpoint might fail because:
- Session not complete
- Or still using old tokens if new session wasn't saved
**Evidence from Browser**:
```
Failed to initialize storage: "{\"error\":\"Failed to initialize storage\",\"details\":\"(0 , _lib_s3__WEBPACK_IMPORTED_MODULE_3__.createUserFolderStructure) is not a function\"}"
```
### Issue 2: Stale Token in NextAuth JWT ✅ CONFIRMED
**Problem**: When Keycloak session is invalidated externally:
- NextAuth JWT still contains old tokens
- JWT callback only checks expiration timestamp
- Doesn't validate with Keycloak that token is still valid
- Returns stale token until expiration timestamp is reached
**Evidence from Terminal**:
```
Failed to refresh Keycloak session: { error: 'invalid_grant', error_description: 'Token is not active' }
GET /api/auth/refresh-keycloak-session 401
```
### Issue 3: Race Condition After Re-authentication ✅ CONFIRMED
**Problem**: After user signs in again:
- New session is created
- But refresh endpoint might be called before:
- JWT callback has run with new account
- New tokens are stored in JWT
- Session cookie is updated in browser
- Result: Refresh endpoint still uses old tokens
**Evidence**: Multiple failed refresh attempts after signin
## Complete Flow Diagram (Confirmed)
```
1. User logs out from Keycloak (external)
2. Keycloak invalidates:
- Session cookies ✅
- Refresh token ✅
- Access token ✅
3. User accesses Dashboard
- NextAuth JWT has old tokens (not expired by timestamp)
- JWT callback returns old token (doesn't validate with Keycloak)
4. User navigates to iframe
- ResponsiveIframe calls refresh endpoint
- Uses old refreshToken from session
5. Keycloak rejects: "Token is not active"
6. Refresh endpoint returns 401 SessionInvalidated
7. Redirect to /signin
8. User authenticates with Keycloak
- Gets NEW tokens
- NextAuth stores new tokens in JWT
9. Storage initialization called
- ⚠️ FAILS: createUserFolderStructure not found
- Signin process incomplete
10. User navigates to iframe again
- Refresh endpoint called
- ⚠️ Might still use old tokens (if new session not ready)
- OR: Session incomplete due to storage init failure
- Fails again → Redirects to signin
11. LOOP or stuck state
```
## Root Causes (Confirmed)
1. **No Proactive Token Validation**
- NextAuth only checks expiration timestamps
- Doesn't validate tokens with Keycloak
- Stale tokens remain in JWT until timestamp expiration
2. **Storage Initialization Failure**
- Missing function: `createUserFolderStructure`
- Prevents complete signin initialization
- May cause session to be incomplete
3. **Race Condition**
- Refresh endpoint called before new session fully established
- Browser might send old session cookie
- JWT callback might not have run yet with new account
4. **No Session Invalidation on External Logout**
- When Keycloak session invalidated externally
- NextAuth doesn't know about it
- Continues using invalid tokens
## Impact on User Experience
**What User Sees**:
1. Logs out from Keycloak (external app)
2. Accesses Dashboard → Still logged in (NextAuth session valid)
3. Tries to access iframe application
4. Gets redirected to signin
5. Signs in again
6. Storage initialization fails (error message)
7. Tries to access iframe again
8. Gets redirected to signin again
9. **Stuck in loop or keeps getting disconnected**
## Recommendations
### Immediate Fixes Needed
1. **Fix Storage Initialization**
- Export `createUserFolderStructure` from `lib/s3.ts`
- Or remove storage init from signin flow if not critical
- Prevents signin from failing
2. **Proactive Token Validation**
- Before using tokens, validate with Keycloak
- Use Keycloak's userinfo endpoint to check token validity
- Clear session if token invalid
3. **Session Invalidation on Refresh Failure**
- When refresh fails with invalid_grant
- Immediately clear NextAuth session cookie
- Force complete re-authentication
4. **Delay Refresh After Signin**
- After redirect from signin, wait for session to be established
- Check session status before calling refresh endpoint
- Add retry logic with backoff
---
**Analysis Date**: 2024
**Status**: Issues Confirmed
**Evidence**: Browser console + Terminal logs

View File

@ -1,540 +0,0 @@
# Stack Quality & Flow Analysis Report
## Executive Summary
This document provides a comprehensive analysis of the codebase quality, architecture patterns, and identifies critical issues in the notification and widget update flows.
**Overall Assessment**: ⚠️ **Moderate Quality** - Good foundation with several critical issues that need attention.
---
## 🔴 Critical Issues
### 1. **Memory Leak: Multiple Polling Intervals**
**Location**: `hooks/use-notifications.ts`, `components/parole.tsx`, `components/calendar/calendar-widget.tsx`
**Problem**:
- `useNotifications` hook creates polling intervals that may not be properly cleaned up
- Multiple components using the hook can create duplicate intervals
- `startPolling()` returns a cleanup function but it's not properly used in the useEffect
**Code Issue**:
```typescript
// Line 226 in use-notifications.ts
return () => stopPolling(); // This return is inside startPolling, not useEffect!
```
**Impact**: Memory leaks, excessive API calls, degraded performance
**Fix Required**:
```typescript
useEffect(() => {
isMountedRef.current = true;
if (status === 'authenticated' && session?.user) {
fetchNotificationCount(true);
fetchNotifications();
startPolling();
}
return () => {
isMountedRef.current = false;
stopPolling(); // ✅ Correct placement
};
}, [status, session?.user, fetchNotificationCount, fetchNotifications, startPolling, stopPolling]);
```
---
### 2. **Race Condition: Notification Badge Double Fetching**
**Location**: `components/notification-badge.tsx`
**Problem**:
- Multiple `useEffect` hooks trigger `manualFetch()` simultaneously
- Lines 65-70, 82-87, and 92-99 all trigger fetches
- No debouncing or request deduplication
**Code Issue**:
```typescript
// Line 65-70: Fetch on dropdown open
useEffect(() => {
if (isOpen && status === 'authenticated') {
manualFetch();
}
}, [isOpen, status]);
// Line 82-87: Fetch on mount
useEffect(() => {
if (status === 'authenticated') {
manualFetch();
}
}, [status]);
// Line 92-99: Fetch on handleOpenChange
const handleOpenChange = (open: boolean) => {
setIsOpen(open);
if (open && status === 'authenticated') {
manualFetch(); // Duplicate fetch!
}
};
```
**Impact**: Unnecessary API calls, potential race conditions, poor UX
**Fix Required**: Consolidate fetch logic, add request deduplication
---
### 3. **Redis KEYS Command Performance Issue**
**Location**: `lib/services/notifications/notification-service.ts` (line 293)
**Problem**:
- Using `redis.keys()` which is O(N) and blocks Redis
- Can cause performance degradation in production
**Code Issue**:
```typescript
// Line 293 - BAD
const listKeys = await redis.keys(listKeysPattern);
if (listKeys.length > 0) {
await redis.del(...listKeys);
}
```
**Impact**: Redis blocking, slow response times, potential timeouts
**Fix Required**: Use `SCAN` instead of `KEYS`:
```typescript
// GOOD - Use SCAN
let cursor = '0';
do {
const [nextCursor, keys] = await redis.scan(cursor, 'MATCH', listKeysPattern, 'COUNT', 100);
cursor = nextCursor;
if (keys.length > 0) {
await redis.del(...keys);
}
} while (cursor !== '0');
```
---
### 4. **Infinite Loop Risk: useEffect Dependencies**
**Location**: `hooks/use-notifications.ts` (line 255)
**Problem**:
- `useEffect` includes functions in dependencies that are recreated on every render
- `fetchNotificationCount`, `fetchNotifications`, `startPolling`, `stopPolling` are in deps
- These functions depend on `session?.user` which changes, causing re-renders
**Code Issue**:
```typescript
useEffect(() => {
// ...
}, [status, session?.user, fetchNotificationCount, fetchNotifications, startPolling, stopPolling]);
// ❌ Functions are recreated, causing infinite loops
```
**Impact**: Infinite re-renders, excessive API calls, browser freezing
**Fix Required**: Remove function dependencies or use `useCallback` properly
---
### 5. **Background Refresh Memory Leak**
**Location**: `lib/services/notifications/notification-service.ts` (line 326)
**Problem**:
- `setTimeout` in `scheduleBackgroundRefresh` creates closures that may not be cleaned up
- No way to cancel pending background refreshes
- Can accumulate in serverless environments
**Code Issue**:
```typescript
setTimeout(async () => {
// This closure holds references and may not be garbage collected
await this.getNotificationCount(userId);
await this.getNotifications(userId, 1, 20);
}, 0);
```
**Impact**: Memory leaks, especially in serverless/edge environments
**Fix Required**: Use proper cleanup mechanism or job queue
---
## ⚠️ High Priority Issues
### 6. **Widget Update Race Conditions**
**Location**: Multiple widget components
**Problem**:
- Widgets don't coordinate updates
- Multiple widgets can trigger simultaneous API calls
- No request deduplication
**Affected Widgets**:
- `components/calendar.tsx` - Auto-refresh every 5 minutes
- `components/parole.tsx` - Auto-polling every 30 seconds
- `components/news.tsx` - Manual refresh only
- `components/flow.tsx` - Manual refresh only
- `components/email.tsx` - Manual refresh only
**Impact**: Unnecessary load on backend, potential rate limiting
**Fix Required**: Implement request deduplication layer or use React Query/SWR
---
### 7. **Redis Connection Singleton Issues**
**Location**: `lib/redis.ts`
**Problem**:
- Singleton pattern but no proper connection pooling
- In serverless environments, connections may not be reused
- No connection health monitoring
- Race condition in `getRedisClient()` when `isConnecting` is true
**Code Issue**:
```typescript
if (isConnecting) {
if (redisClient) return redisClient;
// ⚠️ What if redisClient is null but isConnecting is true?
console.warn('Redis connection in progress, creating temporary client');
}
```
**Impact**: Connection leaks, connection pool exhaustion, degraded performance
**Fix Required**: Implement proper connection pool or use Redis connection manager
---
### 8. **Error Handling Gaps**
**Location**: Multiple files
**Problems**:
- Errors are logged but not always handled gracefully
- No retry logic for transient failures
- No circuit breaker pattern
- Widgets show errors but don't recover automatically
**Examples**:
- `components/notification-badge.tsx` - Shows error but no auto-retry
- `lib/services/notifications/notification-service.ts` - Errors return empty arrays silently
- Widget components - Errors stop updates, no recovery
**Impact**: Poor UX, silent failures, degraded functionality
---
### 9. **Cache Invalidation Issues**
**Location**: `lib/services/notifications/notification-service.ts`
**Problem**:
- Cache invalidation uses `KEYS` command (blocking)
- No partial cache invalidation
- Background refresh may not invalidate properly
- Race condition: cache can be invalidated while being refreshed
**Impact**: Stale data, inconsistent state
---
### 10. **Excessive Logging**
**Location**: Throughout codebase
**Problem**:
- Console.log statements everywhere
- No log levels
- Production code has debug logs
- Performance impact from string concatenation
**Impact**: Performance degradation, log storage costs, security concerns
**Fix Required**: Use proper logging library with levels (e.g., Winston, Pino)
---
## 📊 Architecture Quality Assessment
### Strengths ✅
1. **Adapter Pattern**: Well-implemented notification adapter pattern
2. **Separation of Concerns**: Clear separation between services, hooks, and components
3. **Type Safety**: Good TypeScript usage
4. **Caching Strategy**: Redis caching implemented
5. **Error Boundaries**: Some error handling present
### Weaknesses ❌
1. **No State Management**: Using local state instead of global state management
2. **No Request Deduplication**: Multiple components can trigger same API calls
3. **No Request Cancellation**: No way to cancel in-flight requests
4. **No Optimistic Updates**: UI doesn't update optimistically
5. **No Offline Support**: No handling for offline scenarios
6. **No Request Queue**: No queuing mechanism for API calls
---
## 🔄 Flow Analysis
### Notification Flow Issues
#### Flow Diagram (Current - Problematic):
```
User Action / Polling
useNotifications Hook (multiple instances)
Multiple API Calls (no deduplication)
NotificationService (Redis cache check)
Adapter Calls (parallel, but no error aggregation)
Response (may be stale due to race conditions)
```
#### Issues:
1. **Multiple Hook Instances**: `NotificationBadge` and potentially other components use `useNotifications`, creating multiple polling intervals
2. **No Request Deduplication**: Same request can be made multiple times simultaneously
3. **Cache Race Conditions**: Background refresh can conflict with user requests
4. **No Request Cancellation**: Old requests aren't cancelled when new ones start
### Widget Update Flow Issues
#### Flow Diagram (Current - Problematic):
```
Component Mount
useEffect triggers fetch
API Call (no coordination with other widgets)
State Update (may cause unnecessary re-renders)
Auto-refresh interval (no cleanup guarantee)
```
#### Issues:
1. **No Coordination**: Widgets don't know about each other's updates
2. **Duplicate Requests**: Same data fetched multiple times
3. **Cleanup Issues**: Intervals may not be cleaned up properly
4. **No Stale-While-Revalidate**: No background updates
---
## 🎯 Recommendations
### Immediate Actions (Critical)
1. **Fix Memory Leaks**
- Fix `useNotifications` cleanup
- Ensure all intervals are cleared
- Add cleanup in all widget components
2. **Fix Race Conditions**
- Implement request deduplication
- Fix notification badge double fetching
- Add request cancellation
3. **Fix Redis Performance**
- Replace `KEYS` with `SCAN`
- Implement proper connection pooling
- Add connection health checks
### Short-term Improvements (High Priority)
1. **Implement Request Management**
- Use React Query or SWR for request deduplication
- Implement request cancellation
- Add request queuing
2. **Improve Error Handling**
- Add retry logic with exponential backoff
- Implement circuit breaker pattern
- Add error boundaries
3. **Optimize Caching**
- Implement stale-while-revalidate pattern
- Add cache versioning
- Improve cache invalidation strategy
### Long-term Improvements (Medium Priority)
1. **State Management**
- Consider Zustand or Redux for global state
- Centralize notification state
- Implement optimistic updates
2. **Monitoring & Observability**
- Add proper logging (Winston/Pino)
- Implement metrics collection
- Add performance monitoring
3. **Testing**
- Add unit tests for hooks
- Add integration tests for flows
- Add E2E tests for critical paths
---
## 📈 Performance Metrics (Estimated)
### Current Performance Issues:
1. **API Calls**:
- Estimated 2-3x more calls than necessary due to race conditions
- No request deduplication
2. **Memory Usage**:
- Potential memory leaks from uncleaned intervals
- Closures holding references
3. **Redis Performance**:
- `KEYS` command can block for seconds with many keys
- No connection pooling
4. **Bundle Size**:
- Excessive logging increases bundle size
- No code splitting for widgets
---
## 🔍 Code Quality Metrics
### Code Smells Found:
1. **Long Functions**: Some functions exceed 50 lines
2. **High Cyclomatic Complexity**: `useNotifications` hook has high complexity
3. **Duplicate Code**: Similar fetch patterns across widgets
4. **Magic Numbers**: Hardcoded intervals (300000, 60000, etc.)
5. **Inconsistent Error Handling**: Different error handling patterns
### Technical Debt:
- **Estimated**: Medium-High
- **Areas**:
- Memory management
- Request management
- Error handling
- Caching strategy
- Logging infrastructure
---
## 🛠️ Specific Code Fixes Needed
### Fix 1: useNotifications Hook Cleanup
```typescript
// BEFORE (Current - Problematic)
useEffect(() => {
isMountedRef.current = true;
if (status === 'authenticated' && session?.user) {
fetchNotificationCount(true);
fetchNotifications();
startPolling();
}
return () => {
isMountedRef.current = false;
stopPolling();
};
}, [status, session?.user, fetchNotificationCount, fetchNotifications, startPolling, stopPolling]);
// AFTER (Fixed)
useEffect(() => {
if (status !== 'authenticated' || !session?.user) return;
isMountedRef.current = true;
// Initial fetch
fetchNotificationCount(true);
fetchNotifications();
// Start polling
const intervalId = setInterval(() => {
if (isMountedRef.current) {
debouncedFetchCount();
}
}, POLLING_INTERVAL);
// Cleanup
return () => {
isMountedRef.current = false;
clearInterval(intervalId);
};
}, [status, session?.user?.id]); // Only depend on primitive values
```
### Fix 2: Notification Badge Deduplication
```typescript
// Add request deduplication
const fetchInProgressRef = useRef(false);
const manualFetch = async () => {
if (fetchInProgressRef.current) {
console.log('[NOTIFICATION_BADGE] Fetch already in progress, skipping');
return;
}
fetchInProgressRef.current = true;
try {
await fetchNotifications(1, 10);
} finally {
fetchInProgressRef.current = false;
}
};
```
### Fix 3: Redis SCAN Instead of KEYS
```typescript
// BEFORE
const listKeys = await redis.keys(listKeysPattern);
// AFTER
const listKeys: string[] = [];
let cursor = '0';
do {
const [nextCursor, keys] = await redis.scan(cursor, 'MATCH', listKeysPattern, 'COUNT', 100);
cursor = nextCursor;
listKeys.push(...keys);
} while (cursor !== '0');
```
---
## 📝 Conclusion
The codebase has a solid foundation with good architectural patterns (adapter pattern, separation of concerns), but suffers from several critical issues:
1. **Memory leaks** from improper cleanup
2. **Race conditions** from lack of request coordination
3. **Performance issues** from blocking Redis operations
4. **Error handling gaps** that degrade UX
**Priority**: Fix critical issues immediately, then implement improvements incrementally.
**Estimated Effort**:
- Critical fixes: 2-3 days
- High priority improvements: 1-2 weeks
- Long-term improvements: 1-2 months
---
*Generated: Comprehensive codebase analysis*

View File

@ -1,302 +0,0 @@
# Unified Refresh System - Implementation Summary
## ✅ What Has Been Created
### Core Infrastructure Files
1. **`lib/constants/refresh-intervals.ts`**
- Standardized refresh intervals for all resources
- Helper functions for interval management
- All intervals harmonized and documented
2. **`lib/utils/request-deduplication.ts`**
- Request deduplication utility
- Prevents duplicate API calls within 5 seconds
- Automatic cleanup of stale requests
3. **`lib/services/refresh-manager.ts`**
- Centralized refresh management
- Handles all refresh intervals
- Provides pause/resume functionality
- Prevents duplicate refreshes
4. **`hooks/use-unified-refresh.ts`**
- React hook for easy integration
- Automatic registration/cleanup
- Manual refresh support
### Documentation Files
1. **`IMPLEMENTATION_PLAN_UNIFIED_REFRESH.md`**
- Complete architecture overview
- Detailed implementation guide
- Code examples for all widgets
2. **`IMPLEMENTATION_CHECKLIST.md`**
- Step-by-step checklist
- Daily progress tracking
- Success criteria
---
## 🎯 Next Steps
### Immediate Actions (Start Here)
#### 1. Fix Critical Memory Leaks (30 minutes)
**File**: `lib/services/notifications/notification-service.ts`
Replace `redis.keys()` with `redis.scan()`:
```typescript
// Line 293 - BEFORE
const listKeys = await redis.keys(listKeysPattern);
// AFTER
const listKeys: string[] = [];
let cursor = '0';
do {
const [nextCursor, keys] = await redis.scan(
cursor,
'MATCH',
listKeysPattern,
'COUNT',
100
);
cursor = nextCursor;
if (keys.length > 0) {
listKeys.push(...keys);
}
} while (cursor !== '0');
```
---
#### 2. Test Core Infrastructure (1 hour)
Create a test file to verify everything works:
**File**: `lib/services/__tests__/refresh-manager.test.ts` (optional)
Or test manually:
1. Import refresh manager in a component
2. Register a test resource
3. Verify it refreshes at correct interval
4. Verify cleanup on unmount
---
#### 3. Refactor Notifications (2-3 hours)
**File**: `hooks/use-notifications.ts`
Key changes:
- Remove manual polling logic
- Use `useUnifiedRefresh` hook
- Add `requestDeduplicator` for API calls
- Fix useEffect dependencies
See `IMPLEMENTATION_PLAN_UNIFIED_REFRESH.md` Section 3.1 for full code.
---
#### 4. Refactor Notification Badge (1 hour)
**File**: `components/notification-badge.tsx`
Key changes:
- Remove duplicate `useEffect` hooks
- Use hook's `refresh` function for manual refresh
- Remove manual fetch logic
---
#### 5. Refactor Navigation Bar Time (30 minutes)
**File**: `components/main-nav.tsx` + `components/main-nav-time.tsx` (new)
Key changes:
- Extract time display to separate component
- Use `useUnifiedRefresh` hook (1 second interval)
- Fix static time issue
See `IMPLEMENTATION_PLAN_UNIFIED_REFRESH.md` Section 3.7 for full code.
---
#### 6. Refactor Widgets (1 hour each)
Start with high-frequency widgets:
1. **Parole** (`components/parole.tsx`) - 30s interval
2. **Calendar** (`components/calendar.tsx`) - 5min interval
3. **News** (`components/news.tsx`) - 10min interval
4. **Email** (`components/email.tsx`) - 1min interval
5. **Duties** (`components/flow.tsx`) - 2min interval
See `IMPLEMENTATION_PLAN_UNIFIED_REFRESH.md` Section 3.2 for example code.
---
## 📊 Expected Results
### Before Implementation:
- ❌ 120-150 API calls/minute
- ❌ Memory leaks from uncleaned intervals
- ❌ Duplicate requests
- ❌ No coordination between widgets
### After Implementation:
- ✅ 40-50 API calls/minute (60-70% reduction)
- ✅ No memory leaks
- ✅ Request deduplication working
- ✅ Centralized refresh coordination
---
## 🔍 Testing Checklist
After each phase, verify:
- [ ] No console errors
- [ ] Widgets refresh at correct intervals
- [ ] Manual refresh buttons work
- [ ] No duplicate API calls (check Network tab)
- [ ] No memory leaks (check Memory tab)
- [ ] Cleanup on component unmount
- [ ] Multiple tabs don't cause issues
---
## 🚨 Important Notes
### Backward Compatibility
All new code is designed to be:
- ✅ Non-breaking (old code still works)
- ✅ Gradual migration (one widget at a time)
- ✅ Easy rollback (keep old implementations)
### Migration Strategy
1. **Phase 1**: Core infrastructure (DONE ✅)
2. **Phase 2**: Fix critical issues
3. **Phase 3**: Migrate notifications
4. **Phase 4**: Migrate widgets one by one
5. **Phase 5**: Remove old code
### Feature Flags (Optional)
If you want to toggle the new system:
```typescript
// In refresh manager
const USE_UNIFIED_REFRESH = process.env.NEXT_PUBLIC_USE_UNIFIED_REFRESH !== 'false';
if (USE_UNIFIED_REFRESH) {
// Use new system
} else {
// Use old system
}
```
---
## 📈 Performance Monitoring
### Metrics to Track
1. **API Call Count**
- Before: ~120-150/min
- Target: ~40-50/min
- Monitor in Network tab
2. **Memory Usage**
- Before: Growing over time
- Target: Stable
- Monitor in Memory tab
3. **Refresh Accuracy**
- Verify intervals are correct
- Check last refresh times
- Monitor refresh manager status
### Debug Tools
```typescript
// Get refresh manager status
const status = refreshManager.getStatus();
console.log('Refresh Manager Status:', status);
// Get pending requests
const pendingCount = requestDeduplicator.getPendingCount();
console.log('Pending Requests:', pendingCount);
```
---
## 🎓 Learning Resources
### Key Concepts
1. **Singleton Pattern**: Refresh manager uses singleton
2. **Request Deduplication**: Prevents duplicate calls
3. **React Hooks**: Proper cleanup with useEffect
4. **Memory Management**: Clearing intervals and refs
### Code Patterns
- **useRef for callbacks**: Prevents dependency issues
- **Map for tracking**: Efficient resource management
- **Promise tracking**: Prevents duplicate requests
---
## 🐛 Troubleshooting
### Issue: Widgets not refreshing
**Check**:
1. Is refresh manager started? (`refreshManager.start()`)
2. Is resource registered? (`refreshManager.getStatus()`)
3. Is user authenticated? (`status === 'authenticated'`)
### Issue: Duplicate API calls
**Check**:
1. Is request deduplication working? (`requestDeduplicator.getPendingCount()`)
2. Are multiple components using the same resource?
3. Is TTL too short?
### Issue: Memory leaks
**Check**:
1. Are intervals cleaned up? (check cleanup functions)
2. Are refs cleared? (`isMountedRef.current = false`)
3. Are pending requests cleared? (check cleanup)
---
## 📝 Next Session Goals
1. ✅ Core infrastructure created
2. ⏭️ Fix Redis KEYS → SCAN
3. ⏭️ Refactor notifications hook
4. ⏭️ Refactor notification badge
5. ⏭️ Refactor first widget (Parole)
---
## 🎉 Success!
Once all widgets are migrated:
- ✅ Unified refresh system
- ✅ 60%+ reduction in API calls
- ✅ No memory leaks
- ✅ Better user experience
- ✅ Easier maintenance
---
*Last Updated: Implementation Summary v1.0*

View File

@ -0,0 +1,139 @@
import { NextResponse } from 'next/server';
import { getServerSession } from 'next-auth';
import { authOptions } from "@/app/api/auth/options";
import { logger } from '@/lib/logger';
/**
* GET /api/missions/test-n8n-config
*
* Endpoint de test pour vérifier la configuration N8N
* Permet de diagnostiquer les problèmes de connexion entre Next.js et N8N
*
* Authentification: Requise (session utilisateur)
*/
export async function GET(request: Request) {
try {
// Vérifier l'authentification
const session = await getServerSession(authOptions);
if (!session?.user) {
return NextResponse.json(
{ error: 'Unauthorized' },
{ status: 401 }
);
}
// Récupérer les variables d'environnement
const n8nApiKey = process.env.N8N_API_KEY;
const n8nWebhookUrl = process.env.N8N_WEBHOOK_URL || 'https://brain.slm-lab.net/webhook/mission-created';
const n8nRollbackWebhookUrl = process.env.N8N_ROLLBACK_WEBHOOK_URL || 'https://brain.slm-lab.net/webhook/mission-rollback';
const missionApiUrl = process.env.NEXT_PUBLIC_API_URL || 'https://api.slm-lab.net/api';
const n8nDeleteWebhookUrl = process.env.N8N_DELETE_WEBHOOK_URL;
// Construire la réponse
const config = {
// Variables d'environnement
environment: {
hasN8NApiKey: !!n8nApiKey,
n8nApiKeyLength: n8nApiKey?.length || 0,
n8nApiKeyPrefix: n8nApiKey ? `${n8nApiKey.substring(0, 4)}...` : 'none',
n8nWebhookUrl,
n8nRollbackWebhookUrl,
n8nDeleteWebhookUrl: n8nDeleteWebhookUrl || 'not configured',
missionApiUrl,
},
// URLs construites
urls: {
webhookUrl: n8nWebhookUrl,
callbackUrl: `${missionApiUrl}/api/missions/mission-created`,
rollbackUrl: n8nRollbackWebhookUrl,
deleteUrl: n8nDeleteWebhookUrl || 'not configured',
},
// Statut de configuration
status: {
configured: !!n8nApiKey && !!missionApiUrl,
missingApiKey: !n8nApiKey,
missingApiUrl: !missionApiUrl,
ready: !!n8nApiKey && !!missionApiUrl,
},
// Recommandations
recommendations: [] as string[],
};
// Ajouter des recommandations basées sur la configuration
if (!n8nApiKey) {
config.recommendations.push('❌ N8N_API_KEY n\'est pas défini. Ajoutez-le à vos variables d\'environnement.');
} else {
config.recommendations.push('✅ N8N_API_KEY est configuré');
}
if (!missionApiUrl) {
config.recommendations.push('⚠️ NEXT_PUBLIC_API_URL n\'est pas défini. Utilisation de la valeur par défaut.');
} else {
config.recommendations.push('✅ NEXT_PUBLIC_API_URL est configuré');
}
if (n8nApiKey && n8nApiKey.length < 10) {
config.recommendations.push('⚠️ N8N_API_KEY semble trop court. Vérifiez qu\'il est correct.');
}
// Tester la connectivité au webhook N8N (optionnel, peut être lent)
const testWebhook = request.headers.get('x-test-webhook') === 'true';
if (testWebhook) {
try {
logger.debug('Testing N8N webhook connectivity', { url: n8nWebhookUrl });
const testResponse = await fetch(n8nWebhookUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': n8nApiKey || '',
},
body: JSON.stringify({ test: true }),
signal: AbortSignal.timeout(5000), // 5 secondes timeout
});
config.urls.webhookTest = {
status: testResponse.status,
statusText: testResponse.statusText,
reachable: testResponse.status !== 0,
note: testResponse.status === 404
? 'Webhook non enregistré (workflow inactif?)'
: testResponse.status === 200 || testResponse.status === 400 || testResponse.status === 500
? 'Webhook actif (peut échouer avec des données de test)'
: 'Réponse inattendue',
};
} catch (error) {
config.urls.webhookTest = {
error: error instanceof Error ? error.message : 'Unknown error',
reachable: false,
note: 'Impossible de joindre le webhook N8N',
};
}
} else {
config.urls.webhookTest = {
note: 'Ajoutez le header "x-test-webhook: true" pour tester la connectivité',
};
}
return NextResponse.json({
success: true,
timestamp: new Date().toISOString(),
...config,
});
} catch (error) {
logger.error('Error in test-n8n-config endpoint', {
error: error instanceof Error ? error.message : String(error)
});
return NextResponse.json(
{
success: false,
error: 'Failed to check N8N configuration',
details: error instanceof Error ? error.message : 'Unknown error'
},
{ status: 500 }
);
}
}

1
log
View File

@ -1 +0,0 @@