Can't keep up with Llama.cpp changes, made a n8n workflow to summarize it for me daily

Posted by andy2na@reddit | LocalLLaMA | View on Reddit | 19 comments

My kind of daily news sent to me via Discord

The N8N workflow (you could probably have Hermes or another agent do similar):

{
  "nodes": [
    {
      "parameters": {
        "rule": {
          "interval": [
            {
              "triggerAtHour": 10
            }
          ]
        }
      },
      "id": "8fbb0e44-2d2b-45e5-8f46-1f95a04d88c4",
      "name": "Schedule Trigger",
      "type": "n8n-nodes-base.scheduleTrigger",
      "typeVersion": 1.1,
      "position": [
        0,
        0
      ]
    },
    {
      "parameters": {
        "url": "https://api.github.com/repos/ggml-org/llama.cpp/releases",
        "sendHeaders": true,
        "headerParameters": {
          "parameters": [
            {
              "name": "User-Agent",
              "value": "n8n-workflow-automation"
            }
          ]
        },
        "options": {}
      },
      "id": "729fbd1f-98c5-4ed7-8285-f396c8413b5c",
      "name": "Fetch GitHub Releases",
      "type": "n8n-nodes-base.httpRequest",
      "typeVersion": 4.1,
      "position": [
        224,
        0
      ]
    },
    {
      "parameters": {
        "jsCode": "// Get all items from the previous node\nconst items = $input.all();\nconst twentyFourHoursAgo = new Date(Date.now() - 24 * 60 * 60 * 1000);\n\n// Safely extract the JSON data depending on how n8n parsed the HTTP response\nconst releases = items.length === 1 && Array.isArray(items[0].json) \n  ? items[0].json \n  : items.map(item => item.json);\n\n// Filter releases from the last 24 hours\nconst recentReleases = releases.filter(release => {\n  // Use published_at if available, otherwise fallback to created_at\n  const releaseDate = new Date(release.published_at || release.created_at);\n  return releaseDate > twentyFourHoursAgo;\n});\n\n// If no new releases, return an empty array to stop the workflow\nif (recentReleases.length === 0) {\n  return []; \n}\n\n// Combine all release notes into a single Markdown string\nlet combinedNotes = recentReleases.map(r => {\n  const title = r.name || r.tag_name || 'Update';\n  const body = r.body || 'No release notes provided.';\n  return `## ${title}\\n${body}`;\n}).join('\\n\\n---\\n\\n');\n\n// Output the final combined notes and the count for the AI node to process\nreturn [{ \n  json: { \n    combinedNotes: combinedNotes, \n    count: recentReleases.length \n  } \n}];"
      },
      "id": "3eb58cee-fa2d-47a8-a6aa-71eb9a22f1a0",
      "name": "Filter 24h & Extract",
      "type": "n8n-nodes-base.code",
      "typeVersion": 2,
      "position": [
        448,
        0
      ]
    },
    {
      "parameters": {
        "model": "Qwen3.5-35B:instruct",
        "options": {}
      },
      "id": "206b7d98-374a-493f-b79d-0ceb2f472dd0",
      "name": "OpenAI Model",
      "type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
      "position": [
        624,
        208
      ],
      "typeVersion": 1,
      "credentials": {
        "openAiApi": {
          "id": "fuQa8ZhsNxUYdif2",
          "name": "llama-cpp"
        }
      }
    },
    {
      "parameters": {
        "promptType": "define",
        "text": "=Summarize the following release notes for llama.cpp. \n\nFirst, provide a detailed technical summary of the changes in a paragraph, max 5 sentences.\nSecond, provide a summary in layman's terms  in a paragraph, max 6 sentences.\n\nCrucially, make sure to highlight any specific CUDA optimizations, cache changes, or GPU improvements that would impact a rig running a mixed setup of an RTX 5060 Ti and 3090. Also specifically mention any optimizations regarding Qwen or Gemma. \n\nIf none exist in this update, simply don't mention the hardware.\n\nRelease Notes:\n{{ $json.combinedNotes }}",
        "options": {
          "systemMessage": "You are a helpful movie critic assistant. Your internal knowledge is outdated. You MUST use your SearXNG tool to search the web for every movie query, regardless of the release year. Do not answer from your internal knowledge."
        }
      },
      "id": "fb335d72-077e-4f30-9e00-5cdead0ca298",
      "name": "AI Agent",
      "type": "@n8n/n8n-nodes-langchain.agent",
      "position": [
        624,
        0
      ],
      "typeVersion": 1.6
    },
    {
      "parameters": {
        "authentication": "webhook",
        "content": "=🤖 **Llama.cpp 24 Hour Update Summary** 🤖",
        "options": {},
        "embeds": {
          "values": [
            {
              "description": "={{ $json.output }}",
              "color": "#E5A00D"
            }
          ]
        }
      },
      "id": "54872e50-f045-4b4b-aa54-3898c225ca14",
      "name": "Send to Discord",
      "type": "n8n-nodes-base.discord",
      "position": [
        944,
        0
      ],
      "typeVersion": 2,
      "webhookId": "26caa31f-87b9-411c-9fe5-0037ec9762a0",
      "credentials": {
        "discordWebhookApi": {
          "id": "w6doohH6ryvsJfQl",
          "name": "Discord Webhook account"
        }
      }
    }
  ],
  "connections": {
    "Schedule Trigger": {
      "main": [
        [
          {
            "node": "Fetch GitHub Releases",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Fetch GitHub Releases": {
      "main": [
        [
          {
            "node": "Filter 24h & Extract",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Filter 24h & Extract": {
      "main": [
        [
          {
            "node": "AI Agent",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "OpenAI Model": {
      "ai_languageModel": [
        [
          {
            "node": "AI Agent",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "AI Agent": {
      "main": [
        [
          {
            "node": "Send to Discord",
            "type": "main",
            "index": 0
          }
        ]
      ]
    }
  },
  "pinData": {},
  "meta": {
    "templateCredsSetupCompleted": true,
    "instanceId": "49d2e96c7fb6baed481e1ed60fe55680b7404a961357bb768900d80cabfb3c91"
  }
}