diff --git a/.github/workflows/issue-triage.lock.yml b/.github/workflows/issue-triage.lock.yml
index 3249affe00..3fc2d5d190 100644
--- a/.github/workflows/issue-triage.lock.yml
+++ b/.github/workflows/issue-triage.lock.yml
@@ -5,7 +5,7 @@
#
# Source: githubnext/agentics/workflows/issue-triage.md@0837fb7b24c3b84ee77fb7c8cfa8735c48be347a
#
-# Effective stop-time: 2025-11-27 03:00:29
+# Effective stop-time: 2025-12-03 20:01:19
#
# Job Dependency Graph:
# ```mermaid
@@ -33,18 +33,29 @@
# add_labels --> update_reaction
# missing_tool --> update_reaction
# ```
+#
+# Pinned GitHub Actions:
+# - actions/checkout@v5 (08c6903cd8c0fde910a37f88322edcfb5dd907a8)
+# https://github.com/actions/checkout/commit/08c6903cd8c0fde910a37f88322edcfb5dd907a8
+# - actions/download-artifact@v5 (634f93cb2916e3fdff6788551b99b062d0335ce0)
+# https://github.com/actions/download-artifact/commit/634f93cb2916e3fdff6788551b99b062d0335ce0
+# - actions/github-script@v8 (ed597411d8f924073f98dfc5c65a23a2325f34cd)
+# https://github.com/actions/github-script/commit/ed597411d8f924073f98dfc5c65a23a2325f34cd
+# - actions/setup-node@v6 (2028fbc5c25fe9cf00d9f06a71cc4710d4507903)
+# https://github.com/actions/setup-node/commit/2028fbc5c25fe9cf00d9f06a71cc4710d4507903
+# - actions/upload-artifact@v4 (ea165f8d65b6e75b540449e92b4886f43607fa02)
+# https://github.com/actions/upload-artifact/commit/ea165f8d65b6e75b540449e92b4886f43607fa02
name: "Agentic Triage"
"on":
- issues:
- types:
- - opened
- - reopened
+ schedule:
+ - cron: 0 0 * * *
+ workflow_dispatch: null
permissions: read-all
concurrency:
- group: "gh-aw-${{ github.workflow }}-${{ github.event.issue.number }}"
+ group: "gh-aw-${{ github.workflow }}"
run-name: "Agentic Triage"
@@ -52,7 +63,7 @@ jobs:
activation:
needs: pre_activation
if: needs.pre_activation.outputs.activated == 'true'
- runs-on: ubuntu-latest
+ runs-on: ubuntu-slim
permissions:
discussions: write
issues: write
@@ -63,24 +74,82 @@ jobs:
comment_url: ${{ steps.react.outputs.comment-url }}
reaction_id: ${{ steps.react.outputs.reaction-id }}
steps:
+ - name: Checkout workflows
+ uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
+ with:
+ sparse-checkout: |
+ .github/workflows
+ sparse-checkout-cone-mode: false
+ fetch-depth: 1
+ persist-credentials: false
- name: Check workflow file timestamps
- run: |
- WORKFLOW_FILE="${GITHUB_WORKSPACE}/.github/workflows/$(basename "$GITHUB_WORKFLOW" .lock.yml).md"
- LOCK_FILE="${GITHUB_WORKSPACE}/.github/workflows/$GITHUB_WORKFLOW"
-
- if [ -f "$WORKFLOW_FILE" ] && [ -f "$LOCK_FILE" ]; then
- if [ "$WORKFLOW_FILE" -nt "$LOCK_FILE" ]; then
- echo "🔴🔴🔴 WARNING: Lock file '$LOCK_FILE' is outdated! The workflow file '$WORKFLOW_FILE' has been modified more recently. Run 'gh aw compile' to regenerate the lock file." >&2
- echo "## ⚠️ Workflow Lock File Warning" >> $GITHUB_STEP_SUMMARY
- echo "🔴🔴🔴 **WARNING**: Lock file \`$LOCK_FILE\` is outdated!" >> $GITHUB_STEP_SUMMARY
- echo "The workflow file \`$WORKFLOW_FILE\` has been modified more recently." >> $GITHUB_STEP_SUMMARY
- echo "Run \`gh aw compile\` to regenerate the lock file." >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
- fi
- fi
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
+ env:
+ GH_AW_WORKFLOW_FILE: "issue-triage.lock.yml"
+ with:
+ script: |
+ const fs = require("fs");
+ const path = require("path");
+ async function main() {
+ const workspace = process.env.GITHUB_WORKSPACE;
+ const workflowFile = process.env.GH_AW_WORKFLOW_FILE;
+ if (!workspace) {
+ core.setFailed("Configuration error: GITHUB_WORKSPACE not available.");
+ return;
+ }
+ if (!workflowFile) {
+ core.setFailed("Configuration error: GH_AW_WORKFLOW_FILE not available.");
+ return;
+ }
+ const workflowBasename = path.basename(workflowFile, ".lock.yml");
+ const workflowMdFile = path.join(workspace, ".github", "workflows", `${workflowBasename}.md`);
+ const lockFile = path.join(workspace, ".github", "workflows", workflowFile);
+ core.info(`Checking workflow timestamps:`);
+ core.info(` Source: ${workflowMdFile}`);
+ core.info(` Lock file: ${lockFile}`);
+ let workflowExists = false;
+ let lockExists = false;
+ try {
+ fs.accessSync(workflowMdFile, fs.constants.F_OK);
+ workflowExists = true;
+ } catch (error) {
+ core.info(`Source file does not exist: ${workflowMdFile}`);
+ }
+ try {
+ fs.accessSync(lockFile, fs.constants.F_OK);
+ lockExists = true;
+ } catch (error) {
+ core.info(`Lock file does not exist: ${lockFile}`);
+ }
+ if (!workflowExists || !lockExists) {
+ core.info("Skipping timestamp check - one or both files not found");
+ return;
+ }
+ const workflowStat = fs.statSync(workflowMdFile);
+ const lockStat = fs.statSync(lockFile);
+ const workflowMtime = workflowStat.mtime.getTime();
+ const lockMtime = lockStat.mtime.getTime();
+ core.info(` Source modified: ${workflowStat.mtime.toISOString()}`);
+ core.info(` Lock modified: ${lockStat.mtime.toISOString()}`);
+ if (workflowMtime > lockMtime) {
+ const warningMessage = `🔴🔴🔴 WARNING: Lock file '${lockFile}' is outdated! The workflow file '${workflowMdFile}' has been modified more recently. Run 'gh aw compile' to regenerate the lock file.`;
+ core.error(warningMessage);
+ await core.summary
+ .addRaw("## ⚠️ Workflow Lock File Warning\n\n")
+ .addRaw(`🔴🔴🔴 **WARNING**: Lock file \`${lockFile}\` is outdated!\n\n`)
+ .addRaw(`The workflow file \`${workflowMdFile}\` has been modified more recently.\n\n`)
+ .addRaw("Run `gh aw compile` to regenerate the lock file.\n\n")
+ .write();
+ } else {
+ core.info("✅ Lock file is up to date");
+ }
+ }
+ main().catch(error => {
+ core.setFailed(error instanceof Error ? error.message : String(error));
+ });
- name: Add eyes reaction to the triggering item
id: react
- if: github.event_name == 'issues' || github.event_name == 'issue_comment' || github.event_name == 'pull_request_review_comment' || github.event_name == 'discussion' || github.event_name == 'discussion_comment' || (github.event_name == 'pull_request') && (github.event.pull_request.head.repo.full_name == github.repository)
+ if: github.event_name == 'issues' || github.event_name == 'issue_comment' || github.event_name == 'pull_request_review_comment' || github.event_name == 'discussion' || github.event_name == 'discussion_comment' || (github.event_name == 'pull_request') && (github.event.pull_request.head.repo.id == github.repository_id)
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
env:
GH_AW_REACTION: eyes
@@ -414,9 +483,9 @@ jobs:
- agent
- detection
if: >
- ((!cancelled()) && (contains(needs.agent.outputs.output_types, 'add_comment'))) && (((github.event.issue.number) ||
- (github.event.pull_request.number)) || (github.event.discussion.number))
- runs-on: ubuntu-latest
+ (((!cancelled()) && (needs.agent.result != 'skipped')) && (contains(needs.agent.outputs.output_types, 'add_comment'))) &&
+ (((github.event.issue.number) || (github.event.pull_request.number)) || (github.event.discussion.number))
+ runs-on: ubuntu-slim
permissions:
contents: read
discussions: write
@@ -805,9 +874,9 @@ jobs:
- agent
- detection
if: >
- ((!cancelled()) && (contains(needs.agent.outputs.output_types, 'add_labels'))) && ((github.event.issue.number) ||
- (github.event.pull_request.number))
- runs-on: ubuntu-latest
+ (((!cancelled()) && (needs.agent.result != 'skipped')) && (contains(needs.agent.outputs.output_types, 'add_labels'))) &&
+ ((github.event.issue.number) || (github.event.pull_request.number))
+ runs-on: ubuntu-slim
permissions:
contents: read
issues: write
@@ -1046,6 +1115,8 @@ jobs:
needs: activation
runs-on: ubuntu-latest
permissions: read-all
+ concurrency:
+ group: "gh-aw-copilot-${{ github.workflow }}"
env:
GH_AW_SAFE_OUTPUTS: /tmp/gh-aw/safeoutputs/outputs.jsonl
GH_AW_SAFE_OUTPUTS_CONFIG: "{\"add_comment\":{\"max\":1},\"add_labels\":{\"max\":5},\"missing_tool\":{}}"
@@ -1055,14 +1126,22 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
+ with:
+ persist-credentials: false
- name: Create gh-aw temp directory
run: |
mkdir -p /tmp/gh-aw/agent
echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files"
- name: Configure Git credentials
+ env:
+ REPO_NAME: ${{ github.repository }}
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
- git config --global user.name "${{ github.workflow }}"
+ git config --global user.name "github-actions[bot]"
+ # Re-authenticate git with GitHub token
+ SERVER_URL="${{ github.server_url }}"
+ SERVER_URL="${SERVER_URL#https://}"
+ git remote set-url origin "https://x-access-token:${{ github.token }}@${SERVER_URL}/${REPO_NAME}.git"
echo "Git configured with standard GitHub Actions identity"
- name: Checkout PR branch
if: |
@@ -1114,15 +1193,15 @@ jobs:
env:
COPILOT_CLI_TOKEN: ${{ secrets.COPILOT_CLI_TOKEN }}
- name: Setup Node.js
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020
+ uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903
with:
node-version: '24'
- name: Install GitHub Copilot CLI
- run: npm install -g @github/copilot@0.0.351
+ run: npm install -g @github/copilot@0.0.353
- name: Downloading container images
run: |
set -e
- docker pull ghcr.io/github/github-mcp-server:v0.19.1
+ docker pull ghcr.io/github/github-mcp-server:v0.20.1
docker pull mcp/fetch
- name: Setup Safe Outputs Collector MCP
run: |
@@ -1913,6 +1992,13 @@ jobs:
chmod +x /tmp/gh-aw/safeoutputs/mcp-server.cjs
- name: Setup MCPs
+ env:
+ GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
+ GH_AW_SAFE_OUTPUTS_CONFIG: ${{ toJSON(env.GH_AW_SAFE_OUTPUTS_CONFIG) }}
+ GH_AW_ASSETS_BRANCH: ${{ env.GH_AW_ASSETS_BRANCH }}
+ GH_AW_ASSETS_MAX_SIZE_KB: ${{ env.GH_AW_ASSETS_MAX_SIZE_KB }}
+ GH_AW_ASSETS_ALLOWED_EXTS: ${{ env.GH_AW_ASSETS_ALLOWED_EXTS }}
run: |
mkdir -p /tmp/gh-aw/mcp-config
mkdir -p /home/runner/.copilot
@@ -1932,7 +2018,7 @@ jobs:
"GITHUB_READ_ONLY=1",
"-e",
"GITHUB_TOOLSETS=default",
- "ghcr.io/github/github-mcp-server:v0.19.1"
+ "ghcr.io/github/github-mcp-server:v0.20.1"
],
"tools": ["*"],
"env": {
@@ -1949,7 +2035,9 @@ jobs:
"GH_AW_SAFE_OUTPUTS_CONFIG": "\${GH_AW_SAFE_OUTPUTS_CONFIG}",
"GH_AW_ASSETS_BRANCH": "\${GH_AW_ASSETS_BRANCH}",
"GH_AW_ASSETS_MAX_SIZE_KB": "\${GH_AW_ASSETS_MAX_SIZE_KB}",
- "GH_AW_ASSETS_ALLOWED_EXTS": "\${GH_AW_ASSETS_ALLOWED_EXTS}"
+ "GH_AW_ASSETS_ALLOWED_EXTS": "\${GH_AW_ASSETS_ALLOWED_EXTS}",
+ "GITHUB_REPOSITORY": "\${GITHUB_REPOSITORY}",
+ "GITHUB_SERVER_URL": "\${GITHUB_SERVER_URL}"
}
},
"web-fetch": {
@@ -1978,25 +2066,28 @@ jobs:
GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
run: |
mkdir -p $(dirname "$GH_AW_PROMPT")
- cat > $GH_AW_PROMPT << 'PROMPT_EOF'
+ cat > "$GH_AW_PROMPT" << 'PROMPT_EOF'
# Agentic Triage
- You're a triage assistant for GitHub issues. Your task is to analyze issue #${{ github.event.issue.number }} and perform some initial triage tasks related to that issue.
+ You're a triage assistant for GitHub issues. Your task is to analyze issues created in the last 24 hours and perform initial triage tasks for each of them.
- 1. Select appropriate labels for the issue from the provided list.
+ 1. First, use the `list_issues` tool to retrieve all issues created in the last 24 hours. Filter issues by using the `since` parameter with a timestamp from 24 hours ago (calculate: current time minus 24 hours in ISO 8601 format).
- 2. Retrieve the issue content using the `get_issue` tool. If the issue is obviously spam, or generated by bot, or something else that is not an actual issue to be worked on, then add an issue comment to the issue with a one sentence analysis and exit the workflow.
+ 2. For each issue found, perform the following triage tasks:
- 3. Next, use the GitHub tools to gather additional context about the issue:
+ 3. Select appropriate labels for the issue from the provided list.
+
+ 4. Retrieve the issue content using the `get_issue` tool. If the issue is obviously spam, or generated by bot, or something else that is not an actual issue to be worked on, then add an issue comment to the issue with a one sentence analysis and move to the next issue.
+
+ 5. Next, use the GitHub tools to gather additional context about the issue:
- Fetch the list of labels available in this repository. Use 'gh label list' bash command to fetch the labels. This will give you the labels you can use for triaging issues.
- Fetch any comments on the issue using the `get_issue_comments` tool
- - Find similar issues if needed using the `search_issues` tool
- - List the issues to see other open issues in the repository using the `list_issues` tool
+ - **Search for duplicate and related issues**: Use the `search_issues` tool to find similar issues by searching for key terms from the issue title and description. Look for both open and closed issues that might be related or duplicates.
- 4. Analyze the issue content, considering:
+ 6. Analyze the issue content, considering:
- The issue title and description
- The type of issue (bug report, feature request, question, etc.)
@@ -2005,9 +2096,9 @@ jobs:
- User impact
- Components affected
- 5. Write notes, ideas, nudges, resource links, debugging strategies and/or reproduction steps for the team to consider relevant to the issue.
+ 7. Write notes, ideas, nudges, resource links, debugging strategies and/or reproduction steps for the team to consider relevant to the issue.
- 6. Select appropriate labels from the available labels list provided above:
+ 8. Select appropriate labels from the available labels list provided above:
- Choose labels that accurately reflect the issue's nature
- Be specific but comprehensive
@@ -2017,15 +2108,16 @@ jobs:
- Only select labels from the provided list above
- It's okay to not add any labels if none are clearly applicable
- 7. Apply the selected labels:
+ 9. Apply the selected labels:
- Use the `update_issue` tool to apply the labels to the issue
- DO NOT communicate directly with users
- If no labels are clearly applicable, do not apply any labels
- 8. Add an issue comment to the issue with your analysis:
+ 10. Add an issue comment to the issue with your analysis:
- Start with "🎯 Agentic Issue Triage"
- Provide a brief summary of the issue
+ - **If duplicate or related issues were found**, add a section listing them with links (e.g., "### 🔗 Potentially Related Issues" followed by a bullet list of related issues with their titles and links)
- Mention any relevant details that might help the team understand the issue better
- Include any debugging strategies or reproduction steps if applicable
- Suggest resources or links that might be helpful for resolving the issue or learning skills related to the issue or the particular area of the codebase affected by it
@@ -2035,12 +2127,14 @@ jobs:
- If appropriate break the issue down to sub-tasks and write a checklist of things to do.
- Use collapsed-by-default sections in the GitHub markdown to keep the comment tidy. Collapse all sections except the short main summary at the top.
+ 11. After processing all issues, provide a summary of how many issues were triaged. If no issues were created in the last 24 hours, simply note that no new issues needed triage.
+
PROMPT_EOF
- name: Append XPIA security instructions to prompt
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
run: |
- cat >> $GH_AW_PROMPT << 'PROMPT_EOF'
+ cat >> "$GH_AW_PROMPT" << 'PROMPT_EOF'
---
@@ -2072,7 +2166,7 @@ jobs:
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
run: |
- cat >> $GH_AW_PROMPT << 'PROMPT_EOF'
+ cat >> "$GH_AW_PROMPT" << 'PROMPT_EOF'
---
@@ -2085,7 +2179,7 @@ jobs:
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
run: |
- cat >> $GH_AW_PROMPT << 'PROMPT_EOF'
+ cat >> "$GH_AW_PROMPT" << 'PROMPT_EOF'
---
@@ -2110,7 +2204,7 @@ jobs:
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
run: |
- cat >> $GH_AW_PROMPT << 'PROMPT_EOF'
+ cat >> "$GH_AW_PROMPT" << 'PROMPT_EOF'
---
@@ -2179,14 +2273,14 @@ jobs:
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
run: |
- echo "" >> $GITHUB_STEP_SUMMARY
- echo "Generated Prompt
" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
- echo '```markdown' >> $GITHUB_STEP_SUMMARY
- cat $GH_AW_PROMPT >> $GITHUB_STEP_SUMMARY
- echo '```' >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
- echo " " >> $GITHUB_STEP_SUMMARY
+ echo "" >> "$GITHUB_STEP_SUMMARY"
+ echo "Generated Prompt
" >> "$GITHUB_STEP_SUMMARY"
+ echo "" >> "$GITHUB_STEP_SUMMARY"
+ echo '```markdown' >> "$GITHUB_STEP_SUMMARY"
+ cat "$GH_AW_PROMPT" >> "$GITHUB_STEP_SUMMARY"
+ echo '```' >> "$GITHUB_STEP_SUMMARY"
+ echo "" >> "$GITHUB_STEP_SUMMARY"
+ echo " " >> "$GITHUB_STEP_SUMMARY"
- name: Upload prompt
if: always()
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
@@ -2194,13 +2288,6 @@ jobs:
name: prompt.txt
path: /tmp/gh-aw/aw-prompts/prompt.txt
if-no-files-found: warn
- - name: Capture agent version
- run: |
- VERSION_OUTPUT=$(copilot --version 2>&1 || echo "unknown")
- # Extract semantic version pattern (e.g., 1.2.3, v1.2.3-beta)
- CLEAN_VERSION=$(echo "$VERSION_OUTPUT" | grep -oE 'v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9]+)?' | head -n1 || echo "unknown")
- echo "AGENT_VERSION=$CLEAN_VERSION" >> $GITHUB_ENV
- echo "Agent version: $VERSION_OUTPUT"
- name: Generate agentic run info
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
with:
@@ -2212,7 +2299,7 @@ jobs:
engine_name: "GitHub Copilot CLI",
model: "",
version: "",
- agent_version: process.env.AGENT_VERSION || "",
+ agent_version: "0.0.353",
workflow_name: "Agentic Triage",
experimental: false,
supports_tools_allowlist: true,
@@ -2226,6 +2313,9 @@ jobs:
actor: context.actor,
event_name: context.eventName,
staged: false,
+ steps: {
+ firewall: ""
+ },
created_at: new Date().toISOString()
};
@@ -2262,9 +2352,12 @@ jobs:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
GH_AW_SAFE_OUTPUTS_CONFIG: "{\"add_comment\":{\"max\":1},\"add_labels\":{\"max\":5},\"missing_tool\":{}}"
+ GITHUB_HEAD_REF: ${{ github.head_ref }}
GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ GITHUB_REF_NAME: ${{ github.ref_name }}
GITHUB_STEP_SUMMARY: ${{ env.GITHUB_STEP_SUMMARY }}
GITHUB_TOKEN: ${{ secrets.COPILOT_CLI_TOKEN }}
+ GITHUB_WORKSPACE: ${{ github.workspace }}
XDG_CONFIG_HOME: /home/runner
- name: Redact secrets in logs
if: always()
@@ -2399,71 +2492,135 @@ jobs:
script: |
async function main() {
const fs = require("fs");
- const maxBodyLength = 65000;
- function sanitizeContent(content, maxLength) {
- if (!content || typeof content !== "string") {
- return "";
- }
- const allowedDomainsEnv = process.env.GH_AW_ALLOWED_DOMAINS;
- const defaultAllowedDomains = ["github.com", "github.io", "githubusercontent.com", "githubassets.com", "github.dev", "codespaces.new"];
- const allowedDomains = allowedDomainsEnv
- ? allowedDomainsEnv
- .split(",")
- .map(d => d.trim())
- .filter(d => d)
- : defaultAllowedDomains;
- let sanitized = content;
- sanitized = neutralizeMentions(sanitized);
- sanitized = removeXmlComments(sanitized);
- sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, "");
- sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, "");
- sanitized = sanitizeUrlProtocols(sanitized);
- sanitized = sanitizeUrlDomains(sanitized);
- const lines = sanitized.split("\n");
- const maxLines = 65000;
- maxLength = maxLength || 524288;
- if (lines.length > maxLines) {
- const truncationMsg = "\n[Content truncated due to line count]";
- const truncatedLines = lines.slice(0, maxLines).join("\n") + truncationMsg;
- if (truncatedLines.length > maxLength) {
- sanitized = truncatedLines.substring(0, maxLength - truncationMsg.length) + truncationMsg;
- } else {
- sanitized = truncatedLines;
- }
- } else if (sanitized.length > maxLength) {
- sanitized = sanitized.substring(0, maxLength) + "\n[Content truncated due to length]";
- }
- sanitized = neutralizeBotTriggers(sanitized);
- return sanitized.trim();
- function sanitizeUrlDomains(s) {
- return s.replace(/\bhttps:\/\/[^\s\])}'"<>&\x00-\x1f,;]+/gi, match => {
- const urlAfterProtocol = match.slice(8);
- const hostname = urlAfterProtocol.split(/[\/:\?#]/)[0].toLowerCase();
- const isAllowed = allowedDomains.some(allowedDomain => {
- const normalizedAllowed = allowedDomain.toLowerCase();
- return hostname === normalizedAllowed || hostname.endsWith("." + normalizedAllowed);
- });
- return isAllowed ? match : "(redacted)";
- });
- }
- function sanitizeUrlProtocols(s) {
- return s.replace(/\b(\w+):\/\/[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => {
- return protocol.toLowerCase() === "https" ? match : "(redacted)";
- });
- }
- function neutralizeMentions(s) {
- return s.replace(
- /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g,
- (_m, p1, p2) => `${p1}\`@${p2}\``
- );
- }
- function removeXmlComments(s) {
- return s.replace(//g, "").replace(//g, "");
- }
- function neutralizeBotTriggers(s) {
- return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, (match, action, ref) => `\`${action} #${ref}\``);
- }
+ function sanitizeContent(content, maxLength) {
+ if (!content || typeof content !== "string") {
+ return "";
}
+ const allowedDomainsEnv = process.env.GH_AW_ALLOWED_DOMAINS;
+ const defaultAllowedDomains = ["github.com", "github.io", "githubusercontent.com", "githubassets.com", "github.dev", "codespaces.new"];
+ const allowedDomains = allowedDomainsEnv
+ ? allowedDomainsEnv
+ .split(",")
+ .map(d => d.trim())
+ .filter(d => d)
+ : defaultAllowedDomains;
+ let sanitized = content;
+ sanitized = neutralizeCommands(sanitized);
+ sanitized = neutralizeMentions(sanitized);
+ sanitized = removeXmlComments(sanitized);
+ sanitized = convertXmlTags(sanitized);
+ sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, "");
+ sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, "");
+ sanitized = sanitizeUrlProtocols(sanitized);
+ sanitized = sanitizeUrlDomains(sanitized);
+ const lines = sanitized.split("\n");
+ const maxLines = 65000;
+ maxLength = maxLength || 524288;
+ if (lines.length > maxLines) {
+ const truncationMsg = "\n[Content truncated due to line count]";
+ const truncatedLines = lines.slice(0, maxLines).join("\n") + truncationMsg;
+ if (truncatedLines.length > maxLength) {
+ sanitized = truncatedLines.substring(0, maxLength - truncationMsg.length) + truncationMsg;
+ } else {
+ sanitized = truncatedLines;
+ }
+ } else if (sanitized.length > maxLength) {
+ sanitized = sanitized.substring(0, maxLength) + "\n[Content truncated due to length]";
+ }
+ sanitized = neutralizeBotTriggers(sanitized);
+ return sanitized.trim();
+ function sanitizeUrlDomains(s) {
+ s = s.replace(/\bhttps:\/\/([^\s\])}'"<>&\x00-\x1f,;]+)/gi, (match, rest) => {
+ const hostname = rest.split(/[\/:\?#]/)[0].toLowerCase();
+ const isAllowed = allowedDomains.some(allowedDomain => {
+ const normalizedAllowed = allowedDomain.toLowerCase();
+ return hostname === normalizedAllowed || hostname.endsWith("." + normalizedAllowed);
+ });
+ if (isAllowed) {
+ return match;
+ }
+ const domain = hostname;
+ const truncated = domain.length > 12 ? domain.substring(0, 12) + "..." : domain;
+ core.info(`Redacted URL: ${truncated}`);
+ core.debug(`Redacted URL (full): ${match}`);
+ const urlParts = match.split(/([?])/);
+ let result = "(redacted)";
+ for (let i = 1; i < urlParts.length; i++) {
+ if (urlParts[i].match(/^[?]$/)) {
+ result += urlParts[i];
+ } else {
+ result += sanitizeUrlDomains(urlParts[i]);
+ }
+ }
+ return result;
+ });
+ return s;
+ }
+ function sanitizeUrlProtocols(s) {
+ return s.replace(/(?&\x00-\x1f]+/g, (match, protocol) => {
+ if (protocol.toLowerCase() === "https") {
+ return match;
+ }
+ if (match.includes("::")) {
+ return match;
+ }
+ if (match.includes("://")) {
+ const domainMatch = match.match(/^[^:]+:\/\/([^\/\s?#]+)/);
+ const domain = domainMatch ? domainMatch[1] : match;
+ const truncated = domain.length > 12 ? domain.substring(0, 12) + "..." : domain;
+ core.info(`Redacted URL: ${truncated}`);
+ core.debug(`Redacted URL (full): ${match}`);
+ return "(redacted)";
+ }
+ const dangerousProtocols = ["javascript", "data", "vbscript", "file", "about", "mailto", "tel", "ssh", "ftp"];
+ if (dangerousProtocols.includes(protocol.toLowerCase())) {
+ const truncated = match.length > 12 ? match.substring(0, 12) + "..." : match;
+ core.info(`Redacted URL: ${truncated}`);
+ core.debug(`Redacted URL (full): ${match}`);
+ return "(redacted)";
+ }
+ return match;
+ });
+ }
+ function neutralizeCommands(s) {
+ const commandName = process.env.GH_AW_COMMAND;
+ if (!commandName) {
+ return s;
+ }
+ const escapedCommand = commandName.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
+ return s.replace(new RegExp(`^(\\s*)/(${escapedCommand})\\b`, "i"), "$1`/$2`");
+ }
+ function neutralizeMentions(s) {
+ return s.replace(
+ /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g,
+ (_m, p1, p2) => `${p1}\`@${p2}\``
+ );
+ }
+ function removeXmlComments(s) {
+ return s.replace(//g, "").replace(//g, "");
+ }
+ function convertXmlTags(s) {
+ const allowedTags = ["details", "summary", "code", "em", "b"];
+ s = s.replace(//g, (match, content) => {
+ const convertedContent = content.replace(/<(\/?[A-Za-z][A-Za-z0-9]*(?:[^>]*?))>/g, "($1)");
+ return `(![CDATA[${convertedContent}]])`;
+ });
+ return s.replace(/<(\/?[A-Za-z!][^>]*?)>/g, (match, tagContent) => {
+ const tagNameMatch = tagContent.match(/^\/?\s*([A-Za-z][A-Za-z0-9]*)/);
+ if (tagNameMatch) {
+ const tagName = tagNameMatch[1].toLowerCase();
+ if (allowedTags.includes(tagName)) {
+ return match;
+ }
+ }
+ return `(${tagContent})`;
+ });
+ }
+ function neutralizeBotTriggers(s) {
+ return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, (match, action, ref) => `\`${action} #${ref}\``);
+ }
+ }
+ const maxBodyLength = 65000;
function getMaxAllowedForType(itemType, config) {
const itemConfig = config?.[itemType];
if (itemConfig && typeof itemConfig === "object" && "max" in itemConfig && itemConfig.max) {
@@ -4295,7 +4452,9 @@ jobs:
detection:
needs: agent
runs-on: ubuntu-latest
- permissions: read-all
+ permissions: {}
+ concurrency:
+ group: "gh-aw-copilot-${{ github.workflow }}"
timeout-minutes: 10
steps:
- name: Download prompt artifact
@@ -4444,11 +4603,11 @@ jobs:
env:
COPILOT_CLI_TOKEN: ${{ secrets.COPILOT_CLI_TOKEN }}
- name: Setup Node.js
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020
+ uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903
with:
node-version: '24'
- name: Install GitHub Copilot CLI
- run: npm install -g @github/copilot@0.0.351
+ run: npm install -g @github/copilot@0.0.353
- name: Execute GitHub Copilot CLI
id: agentic_execution
# Copilot CLI tool arguments (sorted):
@@ -4471,8 +4630,11 @@ jobs:
env:
COPILOT_AGENT_RUNNER_TYPE: STANDALONE
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GITHUB_HEAD_REF: ${{ github.head_ref }}
+ GITHUB_REF_NAME: ${{ github.ref_name }}
GITHUB_STEP_SUMMARY: ${{ env.GITHUB_STEP_SUMMARY }}
GITHUB_TOKEN: ${{ secrets.COPILOT_CLI_TOKEN }}
+ GITHUB_WORKSPACE: ${{ github.workspace }}
XDG_CONFIG_HOME: /home/runner
- name: Parse threat detection results
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
@@ -4522,8 +4684,8 @@ jobs:
needs:
- agent
- detection
- if: (!cancelled()) && (contains(needs.agent.outputs.output_types, 'missing_tool'))
- runs-on: ubuntu-latest
+ if: ((!cancelled()) && (needs.agent.result != 'skipped')) && (contains(needs.agent.outputs.output_types, 'missing_tool'))
+ runs-on: ubuntu-slim
permissions:
contents: read
timeout-minutes: 5
@@ -4651,89 +4813,15 @@ jobs:
});
pre_activation:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-slim
outputs:
- activated: ${{ (steps.check_membership.outputs.is_team_member == 'true') && (steps.check_stop_time.outputs.stop_time_ok == 'true') }}
+ activated: ${{ steps.check_stop_time.outputs.stop_time_ok == 'true' }}
steps:
- - name: Check team membership for workflow
- id: check_membership
- uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
- env:
- GH_AW_REQUIRED_ROLES: admin,maintainer,write
- with:
- script: |
- async function main() {
- const { eventName } = context;
- const actor = context.actor;
- const { owner, repo } = context.repo;
- const requiredPermissionsEnv = process.env.GH_AW_REQUIRED_ROLES;
- const requiredPermissions = requiredPermissionsEnv ? requiredPermissionsEnv.split(",").filter(p => p.trim() !== "") : [];
- if (eventName === "workflow_dispatch") {
- const hasWriteRole = requiredPermissions.includes("write");
- if (hasWriteRole) {
- core.info(`✅ Event ${eventName} does not require validation (write role allowed)`);
- core.setOutput("is_team_member", "true");
- core.setOutput("result", "safe_event");
- return;
- }
- core.info(`Event ${eventName} requires validation (write role not allowed)`);
- }
- const safeEvents = ["workflow_run", "schedule"];
- if (safeEvents.includes(eventName)) {
- core.info(`✅ Event ${eventName} does not require validation`);
- core.setOutput("is_team_member", "true");
- core.setOutput("result", "safe_event");
- return;
- }
- if (!requiredPermissions || requiredPermissions.length === 0) {
- core.warning("❌ Configuration error: Required permissions not specified. Contact repository administrator.");
- core.setOutput("is_team_member", "false");
- core.setOutput("result", "config_error");
- core.setOutput("error_message", "Configuration error: Required permissions not specified");
- return;
- }
- try {
- core.info(`Checking if user '${actor}' has required permissions for ${owner}/${repo}`);
- core.info(`Required permissions: ${requiredPermissions.join(", ")}`);
- const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({
- owner: owner,
- repo: repo,
- username: actor,
- });
- const permission = repoPermission.data.permission;
- core.info(`Repository permission level: ${permission}`);
- for (const requiredPerm of requiredPermissions) {
- if (permission === requiredPerm || (requiredPerm === "maintainer" && permission === "maintain")) {
- core.info(`✅ User has ${permission} access to repository`);
- core.setOutput("is_team_member", "true");
- core.setOutput("result", "authorized");
- core.setOutput("user_permission", permission);
- return;
- }
- }
- core.warning(`User permission '${permission}' does not meet requirements: ${requiredPermissions.join(", ")}`);
- core.setOutput("is_team_member", "false");
- core.setOutput("result", "insufficient_permissions");
- core.setOutput("user_permission", permission);
- core.setOutput(
- "error_message",
- `Access denied: User '${actor}' is not authorized. Required permissions: ${requiredPermissions.join(", ")}`
- );
- } catch (repoError) {
- const errorMessage = repoError instanceof Error ? repoError.message : String(repoError);
- core.warning(`Repository permission check failed: ${errorMessage}`);
- core.setOutput("is_team_member", "false");
- core.setOutput("result", "api_error");
- core.setOutput("error_message", `Repository permission check failed: ${errorMessage}`);
- return;
- }
- }
- await main();
- name: Check stop-time limit
id: check_stop_time
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
env:
- GH_AW_STOP_TIME: 2025-11-27 03:00:29
+ GH_AW_STOP_TIME: 2025-12-03 20:01:19
GH_AW_WORKFLOW_NAME: "Agentic Triage"
with:
script: |
@@ -4776,7 +4864,7 @@ jobs:
if: >
(((((always()) && (needs.agent.result != 'skipped')) && (needs.activation.outputs.comment_id)) && (!contains(needs.agent.outputs.output_types, 'add_comment'))) &&
(!contains(needs.agent.outputs.output_types, 'create_pull_request'))) && (!contains(needs.agent.outputs.output_types, 'push_to_pull_request_branch'))
- runs-on: ubuntu-latest
+ runs-on: ubuntu-slim
permissions:
contents: read
discussions: write
diff --git a/.github/workflows/issue-triage.md b/.github/workflows/issue-triage.md
index 2b4739988a..087f009106 100644
--- a/.github/workflows/issue-triage.md
+++ b/.github/workflows/issue-triage.md
@@ -1,7 +1,8 @@
---
on:
- issues:
- types: [opened, reopened]
+ schedule:
+ - cron: '0 0 * * *' # Run daily at midnight UTC
+ workflow_dispatch: # Enable manual trigger
stop-after: +30d # workflow will no longer trigger after 30 days. Remove this and recompile to run indefinitely
reaction: eyes
@@ -25,20 +26,23 @@ source: githubnext/agentics/workflows/issue-triage.md@0837fb7b24c3b84ee77fb7c8cf
-You're a triage assistant for GitHub issues. Your task is to analyze issue #${{ github.event.issue.number }} and perform some initial triage tasks related to that issue.
+You're a triage assistant for GitHub issues. Your task is to analyze issues created in the last 24 hours and perform initial triage tasks for each of them.
-1. Select appropriate labels for the issue from the provided list.
+1. First, use the `list_issues` tool to retrieve all issues created in the last 24 hours. Filter issues by using the `since` parameter with a timestamp from 24 hours ago (calculate: current time minus 24 hours in ISO 8601 format).
-2. Retrieve the issue content using the `get_issue` tool. If the issue is obviously spam, or generated by bot, or something else that is not an actual issue to be worked on, then add an issue comment to the issue with a one sentence analysis and exit the workflow.
+2. For each issue found, perform the following triage tasks:
-3. Next, use the GitHub tools to gather additional context about the issue:
+3. Select appropriate labels for the issue from the provided list.
+
+4. Retrieve the issue content using the `get_issue` tool. If the issue is obviously spam, or generated by bot, or something else that is not an actual issue to be worked on, then add an issue comment to the issue with a one sentence analysis and move to the next issue.
+
+5. Next, use the GitHub tools to gather additional context about the issue:
- Fetch the list of labels available in this repository. Use 'gh label list' bash command to fetch the labels. This will give you the labels you can use for triaging issues.
- Fetch any comments on the issue using the `get_issue_comments` tool
- - Find similar issues if needed using the `search_issues` tool
- - List the issues to see other open issues in the repository using the `list_issues` tool
+ - **Search for duplicate and related issues**: Use the `search_issues` tool to find similar issues by searching for key terms from the issue title and description. Look for both open and closed issues that might be related or duplicates.
-4. Analyze the issue content, considering:
+6. Analyze the issue content, considering:
- The issue title and description
- The type of issue (bug report, feature request, question, etc.)
@@ -47,9 +51,9 @@ You're a triage assistant for GitHub issues. Your task is to analyze issue #${{
- User impact
- Components affected
-5. Write notes, ideas, nudges, resource links, debugging strategies and/or reproduction steps for the team to consider relevant to the issue.
+7. Write notes, ideas, nudges, resource links, debugging strategies and/or reproduction steps for the team to consider relevant to the issue.
-6. Select appropriate labels from the available labels list provided above:
+8. Select appropriate labels from the available labels list provided above:
- Choose labels that accurately reflect the issue's nature
- Be specific but comprehensive
@@ -59,15 +63,16 @@ You're a triage assistant for GitHub issues. Your task is to analyze issue #${{
- Only select labels from the provided list above
- It's okay to not add any labels if none are clearly applicable
-7. Apply the selected labels:
+9. Apply the selected labels:
- Use the `update_issue` tool to apply the labels to the issue
- DO NOT communicate directly with users
- If no labels are clearly applicable, do not apply any labels
-8. Add an issue comment to the issue with your analysis:
+10. Add an issue comment to the issue with your analysis:
- Start with "🎯 Agentic Issue Triage"
- Provide a brief summary of the issue
+ - **If duplicate or related issues were found**, add a section listing them with links (e.g., "### 🔗 Potentially Related Issues" followed by a bullet list of related issues with their titles and links)
- Mention any relevant details that might help the team understand the issue better
- Include any debugging strategies or reproduction steps if applicable
- Suggest resources or links that might be helpful for resolving the issue or learning skills related to the issue or the particular area of the codebase affected by it
@@ -76,3 +81,5 @@ You're a triage assistant for GitHub issues. Your task is to analyze issue #${{
- If you have any debugging strategies, include them in the comment
- If appropriate break the issue down to sub-tasks and write a checklist of things to do.
- Use collapsed-by-default sections in the GitHub markdown to keep the comment tidy. Collapse all sections except the short main summary at the top.
+
+11. After processing all issues, provide a summary of how many issues were triaged. If no issues were created in the last 24 hours, simply note that no new issues needed triage.
diff --git a/README.md b/README.md
index 1f24d5c5f2..50c1ed399b 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-> We just announced Transactions API for Appwrite Databases - [Learn more](https://appwrite.io/blog/post/announcing-transactions-api)
+> We just announced DB operators for Appwrite Databases - [Learn more](https://appwrite.io/blog/post/announcing-db-operators)
> Appwrite Cloud is now Generally Available - [Learn more](https://appwrite.io/cloud-ga)
diff --git a/app/config/templates/site.php b/app/config/templates/site.php
index e552a6b9ac..f2396b66db 100644
--- a/app/config/templates/site.php
+++ b/app/config/templates/site.php
@@ -24,6 +24,7 @@ class UseCases
public const ECOMMERCE = 'ecommerce';
public const DOCUMENTATION = 'documentation';
public const BLOG = 'blog';
+ public const AI = 'artificial intelligence';
}
const TEMPLATE_FRAMEWORKS = [
@@ -970,7 +971,7 @@ return [
'name' => 'TanStack Start starter',
'useCases' => [UseCases::STARTER],
'tagline' => 'Simple TanStack Start application integrated with Appwrite SDK.',
- 'score' => 6, // 0 to 10 based on looks of screenshot (avoid 1,2,3,8,9,10 if possible)
+ 'score' => 9, // 0 to 10 based on looks of screenshot (avoid 1,2,3,8,9,10 if possible)
'screenshotDark' => $url . '/images/sites/templates/starter-for-tanstack-start-dark.png',
'screenshotLight' => $url . '/images/sites/templates/starter-for-tanstack-start-light.png',
'frameworks' => [
@@ -1443,4 +1444,32 @@ return [
'providerVersion' => '0.3.*',
'variables' => []
],
+ [
+ 'key' => 'text-to-speech',
+ 'name' => 'Text-to-speech with ElevenLabs',
+ 'tagline' => 'Next.js app that transforms text into natural, human-like speech using ElevenLabs',
+ 'score' => 10, // 0 to 10 based on looks of screenshot (avoid 1,2,3,8,9,10 if possible)
+ 'useCases' => [UseCases::AI],
+ 'screenshotDark' => $url . '/images/sites/templates/text-to-speech-dark.png',
+ 'screenshotLight' => $url . '/images/sites/templates/text-to-speech-light.png',
+ 'frameworks' => [
+ getFramework('NEXTJS', [
+ 'providerRootDirectory' => './nextjs/text-to-speech',
+ ]),
+ ],
+ 'vcsProvider' => 'github',
+ 'providerRepositoryId' => 'templates-for-sites',
+ 'providerOwner' => 'appwrite',
+ 'providerVersion' => '0.6.*',
+ 'variables' => [
+ [
+ 'name' => 'ELEVENLABS_API_KEY',
+ 'description' => 'Your ElevenLabs API key',
+ 'value' => '',
+ 'placeholder' => 'sk_.....',
+ 'required' => true,
+ 'type' => 'password'
+ ],
+ ]
+ ],
];
diff --git a/public/images/sites/templates/text-to-speech-dark.png b/public/images/sites/templates/text-to-speech-dark.png
new file mode 100644
index 0000000000..afa68c4227
Binary files /dev/null and b/public/images/sites/templates/text-to-speech-dark.png differ
diff --git a/public/images/sites/templates/text-to-speech-light.png b/public/images/sites/templates/text-to-speech-light.png
new file mode 100644
index 0000000000..e10148fe17
Binary files /dev/null and b/public/images/sites/templates/text-to-speech-light.png differ