Skip to content

n8n

n8n is a powerful workflow automation platform integrated into the Local AI Cyber Lab for creating, managing, and executing automated workflows between AI and security components.

Nessus AI - The world's only AI-powered exposure management platform

Architecture Overview

graph TB
    subgraph Frontend["Frontend"]
        editor["Workflow Editor"]
        dashboard["Dashboard"]
        executions["Execution History"]
    end

    subgraph Engine["Workflow Engine"]
        executor["Workflow Executor"]
        scheduler["Task Scheduler"]
        queue["Queue Manager"]

        subgraph Nodes["Node System"]
            triggers["Trigger Nodes"]
            actions["Action Nodes"]
            ai["AI Nodes"]
        end
    end

    subgraph Storage["Data Storage"]
        workflows["Workflow Storage"]
        credentials["Credentials Vault"]
        execdata["Execution Data"]

        subgraph Queue["Queue System"]
            active["Active Queue"]
            waiting["Waiting Queue"]
            failed["Failed Queue"]
        end
    end

    Frontend --> Engine
    Engine --> Storage
    Nodes --> executor

    classDef primary fill:#f9f,stroke:#333,stroke-width:2px
    classDef secondary fill:#bbf,stroke:#333,stroke-width:1px
    class editor,executor primary
    class triggers,actions secondary

Workflow Structure

graph LR
    subgraph Triggers["Trigger Nodes"]
        webhook["Webhook"]
        schedule["Schedule"]
        event["Event"]
    end

    subgraph Processing["Processing Nodes"]
        ai["AI Models"]
        transform["Data Transform"]
        filter["Filter"]
    end

    subgraph Actions["Action Nodes"]
        api["API Call"]
        notify["Notification"]
        database["Database"]
    end

    Triggers --> Processing
    Processing --> Actions

    classDef trigger fill:#f96,stroke:#333
    classDef process fill:#9af,stroke:#333
    classDef action fill:#9f9,stroke:#333

    class webhook,schedule,event trigger
    class ai,transform,filter process
    class api,notify,database action

Installation

n8n is included in the Local AI Cyber Lab. For manual setup:

# Update n8n
docker-compose pull n8n

# Start the service
docker-compose up -d n8n

Configuration

Environment Variables

# .env file
N8N_HOST=0.0.0.0
N8N_PORT=5678
N8N_PROTOCOL=http
N8N_USER_MANAGEMENT_DISABLED=false
N8N_BASIC_AUTH_ACTIVE=true
N8N_BASIC_AUTH_USER=admin
N8N_BASIC_AUTH_PASSWORD=secure-password

Security Configuration

# docker-compose.yml
services:
  n8n:
    environment:
      - N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY}
      - N8N_JWT_SECRET=${N8N_JWT_SECRET}
      - DB_TYPE=postgresdb
      - DB_POSTGRESDB_HOST=db

Workflow Development

Basic Workflow

// Webhook to AI Model Workflow
{
  "nodes": [
    {
      "name": "Webhook",
      "type": "n8n-nodes-base.webhook",
      "parameters": {
        "path": "ai-request",
        "responseMode": "lastNode"
      }
    },
    {
      "name": "AI Model",
      "type": "n8n-nodes-base.httpRequest",
      "parameters": {
        "url": "http://ollama:11434/api/generate",
        "method": "POST",
        "body": {
          "model": "llama2",
          "prompt": "={{$json.prompt}}"
        }
      }
    },
    {
      "name": "Response",
      "type": "n8n-nodes-base.respond",
      "parameters": {
        "responseBody": "={{$json.response}}"
      }
    }
  ]
}

Advanced Workflow

// AI Processing Pipeline
{
  "nodes": [
    {
      "name": "Schedule",
      "type": "n8n-nodes-base.cron",
      "parameters": {
        "triggerTimes": {
          "item": [
            {
              "mode": "everyX",
              "value": 5,
              "unit": "minutes"
            }
          ]
        }
      }
    },
    {
      "name": "Fetch Data",
      "type": "n8n-nodes-base.httpRequest",
      "parameters": {
        "url": "http://api.example.com/data",
        "method": "GET"
      }
    },
    {
      "name": "Process Text",
      "type": "n8n-nodes-base.function",
      "parameters": {
        "functionCode": "
          const items = input.map(item => {
            item.json.processed = item.json.text.toLowerCase();
            return item;
          });
          return items;
        "
      }
    },
    {
      "name": "AI Analysis",
      "type": "n8n-nodes-base.httpRequest",
      "parameters": {
        "url": "http://ai-guardian:8000/api/analyze",
        "method": "POST",
        "body": "={{$json}}"
      }
    }
  ]
}

Node Development

Custom Node

import { INodeType, INodeTypeDescription } from 'n8n-workflow';

export class CustomAINode implements INodeType {
    description: INodeTypeDescription = {
        displayName: 'Custom AI Node',
        name: 'customAINode',
        group: ['transform'],
        version: 1,
        description: 'Process data using custom AI model',
        defaults: {
            name: 'Custom AI',
            color: '#772244',
        },
        inputs: ['main'],
        outputs: ['main'],
        properties: [
            {
                displayName: 'Model',
                name: 'model',
                type: 'string',
                default: 'llama2',
                description: 'AI model to use',
            },
            {
                displayName: 'Input Field',
                name: 'inputField',
                type: 'string',
                default: 'data',
                description: 'Field containing input data',
            }
        ],
    };

    async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
        const items = this.getInputData();
        const model = this.getNodeParameter('model', 0) as string;
        const inputField = this.getNodeParameter('inputField', 0) as string;

        // Process items
        const returnData = await Promise.all(
            items.map(async (item) => {
                const response = await processWithAI(item.json[inputField], model);
                return {
                    json: {
                        ...item.json,
                        aiResult: response,
                    },
                };
            })
        );

        return [returnData];
    }
}

Integration Examples

AI Model Integration

// AI Model Node Configuration
{
  "nodes": [
    {
      "name": "Ollama",
      "type": "n8n-nodes-base.httpRequest",
      "parameters": {
        "url": "http://ollama:11434/api/generate",
        "method": "POST",
        "authentication": "genericCredentialType",
        "genericAuthType": "httpHeaderAuth",
        "options": {
          "allowUnauthorizedCerts": true
        }
      }
    }
  ],
  "connections": {
    "Ollama": {
      "main": [
        [
          {
            "node": "Process Result",
            "type": "main",
            "index": 0
          }
        ]
      ]
    }
  }
}

Security Integration

// Security Workflow
{
  "nodes": [
    {
      "name": "AI Guardian",
      "type": "n8n-nodes-base.httpRequest",
      "parameters": {
        "url": "http://ai-guardian:8000/api/validate",
        "method": "POST",
        "authentication": "headerAuth",
        "headerParameters": {
          "parameters": [
            {
              "name": "Authorization",
              "value": "={{$credentials.aiGuardianApi}}"
            }
          ]
        }
      }
    }
  ]
}

Monitoring

Health Checks

# docker-compose.yml
services:
  n8n:
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:5678/health"]
      interval: 30s
      timeout: 10s
      retries: 3

Workflow Monitoring

// Monitor workflow execution
{
  "nodes": [
    {
      "name": "Error Handler",
      "type": "n8n-nodes-base.errorTrigger",
      "parameters": {
        "events": [
          {
            "name": "workflowFailure"
          }
        ]
      }
    },
    {
      "name": "Send Alert",
      "type": "n8n-nodes-base.slack",
      "parameters": {
        "channel": "monitoring",
        "text": "={{$json.error}}"
      }
    }
  ]
}

Performance Optimization

Queue Management

# n8n configuration
queue:
  bull:
    redis:
      host: redis
      port: 6379
      db: 0
    settings:
      maxJobsPerWorker: 10
      concurrency: 5

Caching

// Cache configuration
{
  "nodes": [
    {
      "name": "Cache Check",
      "type": "n8n-nodes-base.function",
      "parameters": {
        "functionCode": "
          const cacheKey = $input.first().json.key;
          const cached = await $node.context.cache.get(cacheKey);
          if (cached) {
            return cached;
          }
          // Continue processing if not cached
          return $input.all();
        "
      }
    }
  ]
}

Troubleshooting

Common Issues

  1. Connection Issues:

    # Check n8n status
    curl -v http://localhost:5678/health
    
    # Check logs
    docker-compose logs n8n
    

  2. Workflow Issues:

    # Check workflow execution
    curl -H "Authorization: Basic ${N8N_AUTH}" \
      http://localhost:5678/api/v1/executions
    
    # Check active workflows
    curl -H "Authorization: Basic ${N8N_AUTH}" \
      http://localhost:5678/api/v1/workflows
    

Additional Resources

  1. Workflow Guide
  2. Node Development
  3. Integration Guide
  4. Security Guide

Best Practices

  1. Workflow Design:
  2. Keep workflows modular
  3. Use error handling
  4. Implement logging
  5. Test thoroughly

  6. Security:

  7. Secure credentials
  8. Validate inputs
  9. Monitor access
  10. Regular updates

  11. Performance:

  12. Optimize workflows
  13. Use caching
  14. Monitor resources
  15. Regular maintenance