Compare commits
13 Commits
feature/ch
...
feat/dbml-
| Author | SHA1 | Date | |
|---|---|---|---|
| 6c6b49b45c | |||
| 59c43188e5 | |||
| f0e242293f | |||
|
|
50870dd369 | ||
| b93f1d14f0 | |||
|
|
7c41a3e846 | ||
|
|
d1d140e464 | ||
|
|
9cfcb5621b | ||
|
|
d0bfdbfbab | ||
| 24532ef380 | |||
| 9407c05535 | |||
| f163b9c370 | |||
| 4fdd1411b2 |
30
Makefile
30
Makefile
@@ -7,13 +7,17 @@ PATCH_INCREMENT ?= 1
|
|||||||
VERSION_TAG ?= $(shell git describe --tags --exact-match 2>/dev/null || echo dev)
|
VERSION_TAG ?= $(shell git describe --tags --exact-match 2>/dev/null || echo dev)
|
||||||
COMMIT_SHA ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
COMMIT_SHA ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
||||||
BUILD_DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
BUILD_DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||||
|
RELSPEC ?= $(shell command -v relspec 2>/dev/null || echo $(HOME)/go/bin/relspec)
|
||||||
|
SCHEMA_FILES := $(sort $(wildcard schema/*.dbml))
|
||||||
|
MERGE_TARGET_TMP := $(CURDIR)/.cache/schema.merge-target.dbml
|
||||||
|
GENERATED_SCHEMA_MIGRATION := migrations/020_generated_schema.sql
|
||||||
LDFLAGS := -s -w \
|
LDFLAGS := -s -w \
|
||||||
-X $(BUILDINFO_PKG).Version=$(VERSION_TAG) \
|
-X $(BUILDINFO_PKG).Version=$(VERSION_TAG) \
|
||||||
-X $(BUILDINFO_PKG).TagName=$(VERSION_TAG) \
|
-X $(BUILDINFO_PKG).TagName=$(VERSION_TAG) \
|
||||||
-X $(BUILDINFO_PKG).Commit=$(COMMIT_SHA) \
|
-X $(BUILDINFO_PKG).Commit=$(COMMIT_SHA) \
|
||||||
-X $(BUILDINFO_PKG).BuildDate=$(BUILD_DATE)
|
-X $(BUILDINFO_PKG).BuildDate=$(BUILD_DATE)
|
||||||
|
|
||||||
.PHONY: all build clean migrate release-version test
|
.PHONY: all build clean migrate release-version test generate-migrations check-schema-drift
|
||||||
|
|
||||||
all: build
|
all: build
|
||||||
|
|
||||||
@@ -50,3 +54,27 @@ migrate:
|
|||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf $(BIN_DIR)
|
rm -rf $(BIN_DIR)
|
||||||
|
|
||||||
|
generate-migrations:
|
||||||
|
@test -n "$(SCHEMA_FILES)" || (echo "No DBML schema files found in schema/" >&2; exit 1)
|
||||||
|
@command -v $(RELSPEC) >/dev/null 2>&1 || (echo "relspec not found; install git.warky.dev/wdevs/relspecgo/cmd/relspec@latest" >&2; exit 1)
|
||||||
|
@mkdir -p $(dir $(MERGE_TARGET_TMP))
|
||||||
|
@: > $(MERGE_TARGET_TMP)
|
||||||
|
@schema_list=$$(printf '%s\n' $(SCHEMA_FILES) | paste -sd, -); \
|
||||||
|
$(RELSPEC) merge --target dbml --target-path $(MERGE_TARGET_TMP) --source dbml --from-list "$$schema_list" --output pgsql --output-path $(GENERATED_SCHEMA_MIGRATION)
|
||||||
|
|
||||||
|
check-schema-drift:
|
||||||
|
@test -f $(GENERATED_SCHEMA_MIGRATION) || (echo "$(GENERATED_SCHEMA_MIGRATION) is missing; run make generate-migrations" >&2; exit 1)
|
||||||
|
@command -v $(RELSPEC) >/dev/null 2>&1 || (echo "relspec not found; install git.warky.dev/wdevs/relspecgo/cmd/relspec@latest" >&2; exit 1)
|
||||||
|
@mkdir -p $(dir $(MERGE_TARGET_TMP))
|
||||||
|
@tmpfile=$$(mktemp); \
|
||||||
|
: > $(MERGE_TARGET_TMP); \
|
||||||
|
schema_list=$$(printf '%s\n' $(SCHEMA_FILES) | paste -sd, -); \
|
||||||
|
$(RELSPEC) merge --target dbml --target-path $(MERGE_TARGET_TMP) --source dbml --from-list "$$schema_list" --output pgsql --output-path $$tmpfile; \
|
||||||
|
if ! cmp -s $$tmpfile $(GENERATED_SCHEMA_MIGRATION); then \
|
||||||
|
echo "Schema drift detected between schema/*.dbml and $(GENERATED_SCHEMA_MIGRATION)" >&2; \
|
||||||
|
diff -u $(GENERATED_SCHEMA_MIGRATION) $$tmpfile || true; \
|
||||||
|
rm -f $$tmpfile; \
|
||||||
|
exit 1; \
|
||||||
|
fi; \
|
||||||
|
rm -f $$tmpfile
|
||||||
|
|||||||
28
README.md
28
README.md
@@ -61,6 +61,32 @@ A Go MCP server for capturing and retrieving thoughts, memory, and project conte
|
|||||||
| `remove_project_guardrail` | Unlink an agent guardrail from a project; pass `project` explicitly if your client does not preserve MCP sessions |
|
| `remove_project_guardrail` | Unlink an agent guardrail from a project; pass `project` explicitly if your client does not preserve MCP sessions |
|
||||||
| `list_project_guardrails` | List all guardrails linked to a project; pass `project` explicitly if your client does not preserve MCP sessions |
|
| `list_project_guardrails` | List all guardrails linked to a project; pass `project` explicitly if your client does not preserve MCP sessions |
|
||||||
| `get_version_info` | Return the server build version information, including version, tag name, commit, and build date |
|
| `get_version_info` | Return the server build version information, including version, tag name, commit, and build date |
|
||||||
|
| `describe_tools` | List all available MCP tools with names, descriptions, categories, and model-authored usage notes; call this at the start of a session to orient yourself |
|
||||||
|
| `annotate_tool` | Persist your own usage notes for a specific tool; notes are returned by `describe_tools` in future sessions |
|
||||||
|
|
||||||
|
## Self-Documenting Tools
|
||||||
|
|
||||||
|
AMCS includes a built-in tool directory that models can read and annotate.
|
||||||
|
|
||||||
|
**`describe_tools`** returns every registered tool with its name, description, category, and any model-written notes. Call it with no arguments to get the full list, or filter by category:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{ "category": "thoughts" }
|
||||||
|
```
|
||||||
|
|
||||||
|
Available categories: `system`, `thoughts`, `projects`, `files`, `admin`, `household`, `maintenance`, `calendar`, `meals`, `crm`, `skills`, `chat`, `meta`.
|
||||||
|
|
||||||
|
**`annotate_tool`** lets a model write persistent usage notes against a tool name. Notes survive across sessions and are returned by `describe_tools`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{ "tool_name": "capture_thought", "notes": "Always pass project explicitly — session state is not reliable in this client." }
|
||||||
|
```
|
||||||
|
|
||||||
|
Pass an empty string to clear notes. The intended workflow is:
|
||||||
|
|
||||||
|
1. At the start of a session, call `describe_tools` to discover tools and read accumulated notes.
|
||||||
|
2. As you learn something non-obvious about a tool — a gotcha, a workflow pattern, a required field ordering — call `annotate_tool` to record it.
|
||||||
|
3. Future sessions receive the annotation automatically via `describe_tools`.
|
||||||
|
|
||||||
## MCP Error Contract
|
## MCP Error Contract
|
||||||
|
|
||||||
@@ -236,7 +262,7 @@ Alternatively, pass `client_id` and `client_secret` as body parameters instead o
|
|||||||
- `ai.litellm.base_url` and `ai.litellm.api_key` — LiteLLM proxy
|
- `ai.litellm.base_url` and `ai.litellm.api_key` — LiteLLM proxy
|
||||||
- `ai.ollama.base_url` and `ai.ollama.api_key` — Ollama local or remote server
|
- `ai.ollama.base_url` and `ai.ollama.api_key` — Ollama local or remote server
|
||||||
|
|
||||||
See `llm/plan.md` for full architecture and implementation plan.
|
See `llm/plan.md` for an audited high-level status summary of the original implementation plan, and `llm/todo.md` for the audited backfill/fallback follow-up status.
|
||||||
|
|
||||||
## Backfill
|
## Backfill
|
||||||
|
|
||||||
|
|||||||
BIN
assets/icon.png
Normal file
BIN
assets/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 285 KiB |
@@ -158,7 +158,9 @@ func Run(ctx context.Context, configPath string) error {
|
|||||||
|
|
||||||
func routes(logger *slog.Logger, cfg *config.Config, info buildinfo.Info, db *store.DB, provider ai.Provider, keyring *auth.Keyring, oauthRegistry *auth.OAuthRegistry, tokenStore *auth.TokenStore, authCodes *auth.AuthCodeStore, dynClients *auth.DynamicClientStore, activeProjects *session.ActiveProjects) (http.Handler, error) {
|
func routes(logger *slog.Logger, cfg *config.Config, info buildinfo.Info, db *store.DB, provider ai.Provider, keyring *auth.Keyring, oauthRegistry *auth.OAuthRegistry, tokenStore *auth.TokenStore, authCodes *auth.AuthCodeStore, dynClients *auth.DynamicClientStore, activeProjects *session.ActiveProjects) (http.Handler, error) {
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
authMiddleware := auth.Middleware(cfg.Auth, keyring, oauthRegistry, tokenStore, logger)
|
accessTracker := auth.NewAccessTracker()
|
||||||
|
oauthEnabled := oauthRegistry != nil && tokenStore != nil
|
||||||
|
authMiddleware := auth.Middleware(cfg.Auth, keyring, oauthRegistry, tokenStore, accessTracker, logger)
|
||||||
filesTool := tools.NewFilesTool(db, activeProjects)
|
filesTool := tools.NewFilesTool(db, activeProjects)
|
||||||
metadataRetryer := tools.NewMetadataRetryer(context.Background(), db, provider, cfg.Capture, cfg.AI.Metadata.Timeout, activeProjects, logger)
|
metadataRetryer := tools.NewMetadataRetryer(context.Background(), db, provider, cfg.Capture, cfg.AI.Metadata.Timeout, activeProjects, logger)
|
||||||
|
|
||||||
@@ -188,6 +190,7 @@ func routes(logger *slog.Logger, cfg *config.Config, info buildinfo.Info, db *st
|
|||||||
CRM: tools.NewCRMTool(db),
|
CRM: tools.NewCRMTool(db),
|
||||||
Skills: tools.NewSkillsTool(db, activeProjects),
|
Skills: tools.NewSkillsTool(db, activeProjects),
|
||||||
ChatHistory: tools.NewChatHistoryTool(db, activeProjects),
|
ChatHistory: tools.NewChatHistoryTool(db, activeProjects),
|
||||||
|
Describe: tools.NewDescribeTool(db, mcpserver.BuildToolCatalog()),
|
||||||
}
|
}
|
||||||
|
|
||||||
mcpHandler, err := mcpserver.New(cfg.MCP, logger, toolSet, activeProjects.Clear)
|
mcpHandler, err := mcpserver.New(cfg.MCP, logger, toolSet, activeProjects.Clear)
|
||||||
@@ -197,7 +200,7 @@ func routes(logger *slog.Logger, cfg *config.Config, info buildinfo.Info, db *st
|
|||||||
mux.Handle(cfg.MCP.Path, authMiddleware(mcpHandler))
|
mux.Handle(cfg.MCP.Path, authMiddleware(mcpHandler))
|
||||||
mux.Handle("/files", authMiddleware(fileHandler(filesTool)))
|
mux.Handle("/files", authMiddleware(fileHandler(filesTool)))
|
||||||
mux.Handle("/files/{id}", authMiddleware(fileHandler(filesTool)))
|
mux.Handle("/files/{id}", authMiddleware(fileHandler(filesTool)))
|
||||||
if oauthRegistry != nil && tokenStore != nil {
|
if oauthEnabled {
|
||||||
mux.HandleFunc("/.well-known/oauth-authorization-server", oauthMetadataHandler())
|
mux.HandleFunc("/.well-known/oauth-authorization-server", oauthMetadataHandler())
|
||||||
mux.HandleFunc("/oauth-authorization-server", oauthMetadataHandler())
|
mux.HandleFunc("/oauth-authorization-server", oauthMetadataHandler())
|
||||||
mux.HandleFunc("/oauth/register", oauthRegisterHandler(dynClients, logger))
|
mux.HandleFunc("/oauth/register", oauthRegisterHandler(dynClients, logger))
|
||||||
@@ -207,6 +210,7 @@ func routes(logger *slog.Logger, cfg *config.Config, info buildinfo.Info, db *st
|
|||||||
}
|
}
|
||||||
mux.HandleFunc("/favicon.ico", serveFavicon)
|
mux.HandleFunc("/favicon.ico", serveFavicon)
|
||||||
mux.HandleFunc("/images/project.jpg", serveHomeImage)
|
mux.HandleFunc("/images/project.jpg", serveHomeImage)
|
||||||
|
mux.HandleFunc("/images/icon.png", serveIcon)
|
||||||
mux.HandleFunc("/llm", serveLLMInstructions)
|
mux.HandleFunc("/llm", serveLLMInstructions)
|
||||||
|
|
||||||
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -225,59 +229,7 @@ func routes(logger *slog.Logger, cfg *config.Config, info buildinfo.Info, db *st
|
|||||||
_, _ = w.Write([]byte("ready"))
|
_, _ = w.Write([]byte("ready"))
|
||||||
})
|
})
|
||||||
|
|
||||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
mux.HandleFunc("/", homeHandler(info, accessTracker, oauthEnabled))
|
||||||
if r.URL.Path != "/" {
|
|
||||||
http.NotFound(w, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Method != http.MethodGet && r.Method != http.MethodHead {
|
|
||||||
w.Header().Set("Allow", "GET, HEAD")
|
|
||||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
const homePage = `<!doctype html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="utf-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
|
||||||
<title>AMCS</title>
|
|
||||||
<style>
|
|
||||||
body { margin: 0; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif; background: #f5f7fb; color: #172033; }
|
|
||||||
main { max-width: 860px; margin: 48px auto; background: #fff; border-radius: 12px; box-shadow: 0 10px 28px rgba(23, 32, 51, 0.12); overflow: hidden; }
|
|
||||||
.content { padding: 28px; }
|
|
||||||
h1 { margin: 0 0 12px 0; font-size: 2rem; }
|
|
||||||
p { margin: 0; line-height: 1.5; color: #334155; }
|
|
||||||
.actions { margin-top: 18px; }
|
|
||||||
.link { display: inline-block; padding: 10px 14px; border-radius: 8px; background: #172033; color: #fff; text-decoration: none; font-weight: 600; }
|
|
||||||
.link:hover { background: #0f172a; }
|
|
||||||
img { display: block; width: 100%; height: auto; }
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<main>
|
|
||||||
<img src="/images/project.jpg" alt="Avelon Memory Crystal project image">
|
|
||||||
<div class="content">
|
|
||||||
<h1>Avelon Memory Crystal Server (AMCS)</h1>
|
|
||||||
<p>AMCS is a memory server that captures, links, and retrieves structured project thoughts for AI assistants using semantic search, summaries, and MCP tools.</p>
|
|
||||||
<div class="actions">
|
|
||||||
<a class="link" href="/llm">LLM Instructions</a>
|
|
||||||
<a class="link" href="/oauth-authorization-server">OAuth Authorization Server</a>
|
|
||||||
<a class="link" href="/healthz">Health Check</a>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</main>
|
|
||||||
</body>
|
|
||||||
</html>`
|
|
||||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
if r.Method == http.MethodHead {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _ = w.Write([]byte(homePage))
|
|
||||||
})
|
|
||||||
|
|
||||||
return observability.Chain(
|
return observability.Chain(
|
||||||
mux,
|
mux,
|
||||||
@@ -342,3 +294,26 @@ func serveHomeImage(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
_, _ = w.Write(homeImage)
|
_, _ = w.Write(homeImage)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func serveIcon(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if iconImage == nil {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Method != http.MethodGet && r.Method != http.MethodHead {
|
||||||
|
w.Header().Set("Allow", "GET, HEAD")
|
||||||
|
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "image/png")
|
||||||
|
w.Header().Set("Cache-Control", "public, max-age=31536000, immutable")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
if r.Method == http.MethodHead {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _ = w.Write(iconImage)
|
||||||
|
}
|
||||||
|
|||||||
BIN
internal/app/static/icon.png
Normal file
BIN
internal/app/static/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 285 KiB |
@@ -12,6 +12,7 @@ var (
|
|||||||
|
|
||||||
faviconICO = mustReadStaticFile("favicon.ico")
|
faviconICO = mustReadStaticFile("favicon.ico")
|
||||||
homeImage = mustReadStaticFile("avelonmemorycrystal.jpg")
|
homeImage = mustReadStaticFile("avelonmemorycrystal.jpg")
|
||||||
|
iconImage = tryReadStaticFile("icon.png")
|
||||||
)
|
)
|
||||||
|
|
||||||
func mustReadStaticFile(name string) []byte {
|
func mustReadStaticFile(name string) []byte {
|
||||||
@@ -22,3 +23,11 @@ func mustReadStaticFile(name string) []byte {
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func tryReadStaticFile(name string) []byte {
|
||||||
|
data, err := fs.ReadFile(staticFiles, "static/"+name)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|||||||
171
internal/app/status.go
Normal file
171
internal/app/status.go
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"html"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/amcs/internal/auth"
|
||||||
|
"git.warky.dev/wdevs/amcs/internal/buildinfo"
|
||||||
|
)
|
||||||
|
|
||||||
|
const connectedWindow = 10 * time.Minute
|
||||||
|
|
||||||
|
type statusPageData struct {
|
||||||
|
Version string
|
||||||
|
BuildDate string
|
||||||
|
Commit string
|
||||||
|
ConnectedCount int
|
||||||
|
TotalKnown int
|
||||||
|
Entries []auth.AccessSnapshot
|
||||||
|
OAuthEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func renderHomePage(info buildinfo.Info, tracker *auth.AccessTracker, oauthEnabled bool, now time.Time) string {
|
||||||
|
entries := tracker.Snapshot()
|
||||||
|
data := statusPageData{
|
||||||
|
Version: fallback(info.Version, "dev"),
|
||||||
|
BuildDate: fallback(info.BuildDate, "unknown"),
|
||||||
|
Commit: fallback(info.Commit, "unknown"),
|
||||||
|
ConnectedCount: tracker.ConnectedCount(now, connectedWindow),
|
||||||
|
TotalKnown: len(entries),
|
||||||
|
Entries: entries,
|
||||||
|
OAuthEnabled: oauthEnabled,
|
||||||
|
}
|
||||||
|
|
||||||
|
var b strings.Builder
|
||||||
|
b.WriteString(`<!doctype html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<title>AMCS</title>
|
||||||
|
<style>
|
||||||
|
body { margin: 0; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif; background: #f5f7fb; color: #172033; }
|
||||||
|
main { max-width: 980px; margin: 48px auto; background: #fff; border-radius: 12px; box-shadow: 0 10px 28px rgba(23, 32, 51, 0.12); overflow: hidden; }
|
||||||
|
.content { padding: 28px; }
|
||||||
|
h1, h2 { margin: 0 0 12px 0; }
|
||||||
|
p { margin: 0; line-height: 1.5; color: #334155; }
|
||||||
|
.actions { margin-top: 18px; display: flex; flex-wrap: wrap; gap: 10px; }
|
||||||
|
.link { display: inline-block; padding: 10px 14px; border-radius: 8px; background: #172033; color: #fff; text-decoration: none; font-weight: 600; }
|
||||||
|
.link:hover { background: #0f172a; }
|
||||||
|
.stats { display: grid; grid-template-columns: repeat(auto-fit, minmax(160px, 1fr)); gap: 12px; margin-top: 24px; }
|
||||||
|
.card { background: #eef2ff; border-radius: 10px; padding: 16px; }
|
||||||
|
.label { display: block; font-size: 0.85rem; text-transform: uppercase; letter-spacing: 0.04em; color: #475569; }
|
||||||
|
.value { display: block; margin-top: 6px; font-size: 1.4rem; font-weight: 700; color: #0f172a; }
|
||||||
|
.meta { margin-top: 28px; color: #475569; font-size: 0.95rem; }
|
||||||
|
table { width: 100%; border-collapse: collapse; margin-top: 16px; }
|
||||||
|
th, td { text-align: left; padding: 10px 8px; border-bottom: 1px solid #e2e8f0; vertical-align: top; }
|
||||||
|
th { font-size: 0.85rem; text-transform: uppercase; letter-spacing: 0.04em; color: #475569; }
|
||||||
|
.empty { margin-top: 16px; color: #64748b; }
|
||||||
|
code { font-family: ui-monospace, SFMono-Regular, Menlo, monospace; }
|
||||||
|
img { display: block; width: 100%; height: auto; }
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<main>
|
||||||
|
<img src="/images/project.jpg" alt="Avelon Memory Crystal project image">
|
||||||
|
<div class="content">
|
||||||
|
<h1>Avelon Memory Crystal Server (AMCS)</h1>
|
||||||
|
<p>AMCS is a memory server that captures, links, and retrieves structured project thoughts for AI assistants using semantic search, summaries, and MCP tools.</p>
|
||||||
|
<div class="actions">
|
||||||
|
<a class="link" href="/llm">LLM Instructions</a>
|
||||||
|
<a class="link" href="/healthz">Health Check</a>
|
||||||
|
<a class="link" href="/readyz">Readiness Check</a>`)
|
||||||
|
if data.OAuthEnabled {
|
||||||
|
b.WriteString(`
|
||||||
|
<a class="link" href="/oauth-authorization-server">OAuth Authorization Server</a>`)
|
||||||
|
}
|
||||||
|
b.WriteString(`
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="stats">
|
||||||
|
<div class="card">
|
||||||
|
<span class="label">Connected users</span>
|
||||||
|
<span class="value">` + fmt.Sprintf("%d", data.ConnectedCount) + `</span>
|
||||||
|
</div>
|
||||||
|
<div class="card">
|
||||||
|
<span class="label">Known principals</span>
|
||||||
|
<span class="value">` + fmt.Sprintf("%d", data.TotalKnown) + `</span>
|
||||||
|
</div>
|
||||||
|
<div class="card">
|
||||||
|
<span class="label">Version</span>
|
||||||
|
<span class="value">` + html.EscapeString(data.Version) + `</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="meta">
|
||||||
|
<strong>Build date:</strong> ` + html.EscapeString(data.BuildDate) + ` •
|
||||||
|
<strong>Commit:</strong> <code>` + html.EscapeString(data.Commit) + `</code> •
|
||||||
|
<strong>Connected window:</strong> last 10 minutes
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h2 style="margin-top: 28px;">Recent access</h2>`)
|
||||||
|
if len(data.Entries) == 0 {
|
||||||
|
b.WriteString(`
|
||||||
|
<p class="empty">No authenticated access recorded yet.</p>`)
|
||||||
|
} else {
|
||||||
|
b.WriteString(`
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Principal</th>
|
||||||
|
<th>Last accessed</th>
|
||||||
|
<th>Last path</th>
|
||||||
|
<th>Requests</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>`)
|
||||||
|
for _, entry := range data.Entries {
|
||||||
|
b.WriteString(`
|
||||||
|
<tr>
|
||||||
|
<td><code>` + html.EscapeString(entry.KeyID) + `</code></td>
|
||||||
|
<td>` + html.EscapeString(entry.LastAccessedAt.UTC().Format(time.RFC3339)) + `</td>
|
||||||
|
<td>` + html.EscapeString(entry.LastPath) + `</td>
|
||||||
|
<td>` + fmt.Sprintf("%d", entry.RequestCount) + `</td>
|
||||||
|
</tr>`)
|
||||||
|
}
|
||||||
|
b.WriteString(`
|
||||||
|
</tbody>
|
||||||
|
</table>`)
|
||||||
|
}
|
||||||
|
b.WriteString(`
|
||||||
|
</div>
|
||||||
|
</main>
|
||||||
|
</body>
|
||||||
|
</html>`)
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func fallback(value, defaultValue string) string {
|
||||||
|
if strings.TrimSpace(value) == "" {
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func homeHandler(info buildinfo.Info, tracker *auth.AccessTracker, oauthEnabled bool) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.URL.Path != "/" {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Method != http.MethodGet && r.Method != http.MethodHead {
|
||||||
|
w.Header().Set("Allow", "GET, HEAD")
|
||||||
|
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
if r.Method == http.MethodHead {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _ = w.Write([]byte(renderHomePage(info, tracker, oauthEnabled, time.Now())))
|
||||||
|
}
|
||||||
|
}
|
||||||
84
internal/app/status_test.go
Normal file
84
internal/app/status_test.go
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/amcs/internal/auth"
|
||||||
|
"git.warky.dev/wdevs/amcs/internal/buildinfo"
|
||||||
|
"git.warky.dev/wdevs/amcs/internal/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRenderHomePageHidesOAuthLinkWhenDisabled(t *testing.T) {
|
||||||
|
tracker := auth.NewAccessTracker()
|
||||||
|
page := renderHomePage(buildinfo.Info{Version: "v1.2.3", BuildDate: "2026-04-04", Commit: "abc123"}, tracker, false, time.Date(2026, 4, 4, 12, 0, 0, 0, time.UTC))
|
||||||
|
|
||||||
|
if strings.Contains(page, "/oauth-authorization-server") {
|
||||||
|
t.Fatal("page unexpectedly contains OAuth link")
|
||||||
|
}
|
||||||
|
if !strings.Contains(page, "Connected users") {
|
||||||
|
t.Fatal("page missing Connected users stat")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRenderHomePageShowsTrackedAccess(t *testing.T) {
|
||||||
|
tracker := auth.NewAccessTracker()
|
||||||
|
now := time.Date(2026, 4, 4, 12, 0, 0, 0, time.UTC)
|
||||||
|
tracker.Record("client-a", "/files", "127.0.0.1:1234", "tester", now)
|
||||||
|
|
||||||
|
page := renderHomePage(buildinfo.Info{Version: "v1.2.3"}, tracker, true, now)
|
||||||
|
|
||||||
|
for _, needle := range []string{"client-a", "/files", "1</span>", "/oauth-authorization-server"} {
|
||||||
|
if !strings.Contains(page, needle) {
|
||||||
|
t.Fatalf("page missing %q", needle)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHomeHandlerAllowsHead(t *testing.T) {
|
||||||
|
handler := homeHandler(buildinfo.Info{Version: "v1"}, auth.NewAccessTracker(), false)
|
||||||
|
req := httptest.NewRequest(http.MethodHead, "/", nil)
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
|
handler.ServeHTTP(rec, req)
|
||||||
|
|
||||||
|
if rec.Code != http.StatusOK {
|
||||||
|
t.Fatalf("status = %d, want %d", rec.Code, http.StatusOK)
|
||||||
|
}
|
||||||
|
if body := rec.Body.String(); body != "" {
|
||||||
|
t.Fatalf("body = %q, want empty for HEAD", body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMiddlewareRecordsAuthenticatedAccess(t *testing.T) {
|
||||||
|
keyring, err := auth.NewKeyring([]config.APIKey{{ID: "client-a", Value: "secret"}})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewKeyring() error = %v", err)
|
||||||
|
}
|
||||||
|
tracker := auth.NewAccessTracker()
|
||||||
|
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
|
||||||
|
handler := auth.Middleware(config.AuthConfig{HeaderName: "x-brain-key"}, keyring, nil, nil, tracker, logger)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
}))
|
||||||
|
|
||||||
|
req := httptest.NewRequest(http.MethodGet, "/files", nil)
|
||||||
|
req.Header.Set("x-brain-key", "secret")
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(rec, req)
|
||||||
|
|
||||||
|
if rec.Code != http.StatusNoContent {
|
||||||
|
t.Fatalf("status = %d, want %d", rec.Code, http.StatusNoContent)
|
||||||
|
}
|
||||||
|
snap := tracker.Snapshot()
|
||||||
|
if len(snap) != 1 {
|
||||||
|
t.Fatalf("len(snapshot) = %d, want 1", len(snap))
|
||||||
|
}
|
||||||
|
if snap[0].KeyID != "client-a" || snap[0].LastPath != "/files" {
|
||||||
|
t.Fatalf("snapshot[0] = %+v, want keyID client-a and path /files", snap[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
81
internal/auth/access_tracker.go
Normal file
81
internal/auth/access_tracker.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AccessSnapshot struct {
|
||||||
|
KeyID string
|
||||||
|
LastPath string
|
||||||
|
RemoteAddr string
|
||||||
|
UserAgent string
|
||||||
|
RequestCount int
|
||||||
|
LastAccessedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type AccessTracker struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
entries map[string]AccessSnapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAccessTracker() *AccessTracker {
|
||||||
|
return &AccessTracker{entries: make(map[string]AccessSnapshot)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AccessTracker) Record(keyID, path, remoteAddr, userAgent string, now time.Time) {
|
||||||
|
if t == nil || keyID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.mu.Lock()
|
||||||
|
defer t.mu.Unlock()
|
||||||
|
|
||||||
|
entry := t.entries[keyID]
|
||||||
|
entry.KeyID = keyID
|
||||||
|
entry.LastPath = path
|
||||||
|
entry.RemoteAddr = remoteAddr
|
||||||
|
entry.UserAgent = userAgent
|
||||||
|
entry.LastAccessedAt = now.UTC()
|
||||||
|
entry.RequestCount++
|
||||||
|
t.entries[keyID] = entry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AccessTracker) Snapshot() []AccessSnapshot {
|
||||||
|
if t == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t.mu.RLock()
|
||||||
|
defer t.mu.RUnlock()
|
||||||
|
|
||||||
|
items := make([]AccessSnapshot, 0, len(t.entries))
|
||||||
|
for _, entry := range t.entries {
|
||||||
|
items = append(items, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(items, func(i, j int) bool {
|
||||||
|
return items[i].LastAccessedAt.After(items[j].LastAccessedAt)
|
||||||
|
})
|
||||||
|
|
||||||
|
return items
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AccessTracker) ConnectedCount(now time.Time, window time.Duration) int {
|
||||||
|
if t == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
cutoff := now.UTC().Add(-window)
|
||||||
|
t.mu.RLock()
|
||||||
|
defer t.mu.RUnlock()
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for _, entry := range t.entries {
|
||||||
|
if !entry.LastAccessedAt.Before(cutoff) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
45
internal/auth/access_tracker_test.go
Normal file
45
internal/auth/access_tracker_test.go
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccessTrackerRecordAndSnapshot(t *testing.T) {
|
||||||
|
tracker := NewAccessTracker()
|
||||||
|
older := time.Date(2026, 4, 4, 10, 0, 0, 0, time.UTC)
|
||||||
|
newer := older.Add(2 * time.Minute)
|
||||||
|
|
||||||
|
tracker.Record("client-a", "/files", "10.0.0.1:1234", "agent-a", older)
|
||||||
|
tracker.Record("client-b", "/mcp", "10.0.0.2:1234", "agent-b", newer)
|
||||||
|
tracker.Record("client-a", "/files/1", "10.0.0.1:1234", "agent-a2", newer.Add(30*time.Second))
|
||||||
|
|
||||||
|
snap := tracker.Snapshot()
|
||||||
|
if len(snap) != 2 {
|
||||||
|
t.Fatalf("len(snapshot) = %d, want 2", len(snap))
|
||||||
|
}
|
||||||
|
if snap[0].KeyID != "client-a" {
|
||||||
|
t.Fatalf("snapshot[0].KeyID = %q, want client-a", snap[0].KeyID)
|
||||||
|
}
|
||||||
|
if snap[0].RequestCount != 2 {
|
||||||
|
t.Fatalf("snapshot[0].RequestCount = %d, want 2", snap[0].RequestCount)
|
||||||
|
}
|
||||||
|
if snap[0].LastPath != "/files/1" {
|
||||||
|
t.Fatalf("snapshot[0].LastPath = %q, want /files/1", snap[0].LastPath)
|
||||||
|
}
|
||||||
|
if snap[0].UserAgent != "agent-a2" {
|
||||||
|
t.Fatalf("snapshot[0].UserAgent = %q, want agent-a2", snap[0].UserAgent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccessTrackerConnectedCount(t *testing.T) {
|
||||||
|
tracker := NewAccessTracker()
|
||||||
|
now := time.Date(2026, 4, 4, 12, 0, 0, 0, time.UTC)
|
||||||
|
|
||||||
|
tracker.Record("recent", "/mcp", "", "", now.Add(-2*time.Minute))
|
||||||
|
tracker.Record("stale", "/mcp", "", "", now.Add(-11*time.Minute))
|
||||||
|
|
||||||
|
if got := tracker.ConnectedCount(now, 10*time.Minute); got != 1 {
|
||||||
|
t.Fatalf("ConnectedCount() = %d, want 1", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -39,7 +39,7 @@ func TestMiddlewareAllowsHeaderAuthAndSetsContext(t *testing.T) {
|
|||||||
t.Fatalf("NewKeyring() error = %v", err)
|
t.Fatalf("NewKeyring() error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := Middleware(config.AuthConfig{HeaderName: "x-brain-key"}, keyring, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
handler := Middleware(config.AuthConfig{HeaderName: "x-brain-key"}, keyring, nil, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
keyID, ok := KeyIDFromContext(r.Context())
|
keyID, ok := KeyIDFromContext(r.Context())
|
||||||
if !ok || keyID != "client-a" {
|
if !ok || keyID != "client-a" {
|
||||||
t.Fatalf("KeyIDFromContext() = (%q, %v), want (client-a, true)", keyID, ok)
|
t.Fatalf("KeyIDFromContext() = (%q, %v), want (client-a, true)", keyID, ok)
|
||||||
@@ -63,7 +63,7 @@ func TestMiddlewareAllowsBearerAuthAndSetsContext(t *testing.T) {
|
|||||||
t.Fatalf("NewKeyring() error = %v", err)
|
t.Fatalf("NewKeyring() error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := Middleware(config.AuthConfig{HeaderName: "x-brain-key"}, keyring, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
handler := Middleware(config.AuthConfig{HeaderName: "x-brain-key"}, keyring, nil, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
keyID, ok := KeyIDFromContext(r.Context())
|
keyID, ok := KeyIDFromContext(r.Context())
|
||||||
if !ok || keyID != "client-a" {
|
if !ok || keyID != "client-a" {
|
||||||
t.Fatalf("KeyIDFromContext() = (%q, %v), want (client-a, true)", keyID, ok)
|
t.Fatalf("KeyIDFromContext() = (%q, %v), want (client-a, true)", keyID, ok)
|
||||||
@@ -90,7 +90,7 @@ func TestMiddlewarePrefersExplicitHeaderOverBearerAuth(t *testing.T) {
|
|||||||
t.Fatalf("NewKeyring() error = %v", err)
|
t.Fatalf("NewKeyring() error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := Middleware(config.AuthConfig{HeaderName: "x-brain-key"}, keyring, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
handler := Middleware(config.AuthConfig{HeaderName: "x-brain-key"}, keyring, nil, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
keyID, ok := KeyIDFromContext(r.Context())
|
keyID, ok := KeyIDFromContext(r.Context())
|
||||||
if !ok || keyID != "client-a" {
|
if !ok || keyID != "client-a" {
|
||||||
t.Fatalf("KeyIDFromContext() = (%q, %v), want (client-a, true)", keyID, ok)
|
t.Fatalf("KeyIDFromContext() = (%q, %v), want (client-a, true)", keyID, ok)
|
||||||
@@ -119,7 +119,7 @@ func TestMiddlewareAllowsQueryParamWhenEnabled(t *testing.T) {
|
|||||||
HeaderName: "x-brain-key",
|
HeaderName: "x-brain-key",
|
||||||
QueryParam: "key",
|
QueryParam: "key",
|
||||||
AllowQueryParam: true,
|
AllowQueryParam: true,
|
||||||
}, keyring, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
}, keyring, nil, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
}))
|
}))
|
||||||
|
|
||||||
@@ -138,7 +138,7 @@ func TestMiddlewareRejectsMissingOrInvalidKey(t *testing.T) {
|
|||||||
t.Fatalf("NewKeyring() error = %v", err)
|
t.Fatalf("NewKeyring() error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := Middleware(config.AuthConfig{HeaderName: "x-brain-key"}, keyring, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
handler := Middleware(config.AuthConfig{HeaderName: "x-brain-key"}, keyring, nil, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
t.Fatal("next handler should not be called")
|
t.Fatal("next handler should not be called")
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"log/slog"
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"git.warky.dev/wdevs/amcs/internal/config"
|
"git.warky.dev/wdevs/amcs/internal/config"
|
||||||
)
|
)
|
||||||
@@ -14,11 +15,16 @@ type contextKey string
|
|||||||
|
|
||||||
const keyIDContextKey contextKey = "auth.key_id"
|
const keyIDContextKey contextKey = "auth.key_id"
|
||||||
|
|
||||||
func Middleware(cfg config.AuthConfig, keyring *Keyring, oauthRegistry *OAuthRegistry, tokenStore *TokenStore, log *slog.Logger) func(http.Handler) http.Handler {
|
func Middleware(cfg config.AuthConfig, keyring *Keyring, oauthRegistry *OAuthRegistry, tokenStore *TokenStore, tracker *AccessTracker, log *slog.Logger) func(http.Handler) http.Handler {
|
||||||
headerName := cfg.HeaderName
|
headerName := cfg.HeaderName
|
||||||
if headerName == "" {
|
if headerName == "" {
|
||||||
headerName = "x-brain-key"
|
headerName = "x-brain-key"
|
||||||
}
|
}
|
||||||
|
recordAccess := func(r *http.Request, keyID string) {
|
||||||
|
if tracker != nil {
|
||||||
|
tracker.Record(keyID, r.URL.Path, r.RemoteAddr, r.UserAgent(), time.Now())
|
||||||
|
}
|
||||||
|
}
|
||||||
return func(next http.Handler) http.Handler {
|
return func(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
// 1. Custom header → keyring only.
|
// 1. Custom header → keyring only.
|
||||||
@@ -30,6 +36,7 @@ func Middleware(cfg config.AuthConfig, keyring *Keyring, oauthRegistry *OAuthReg
|
|||||||
http.Error(w, "invalid API key", http.StatusUnauthorized)
|
http.Error(w, "invalid API key", http.StatusUnauthorized)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
recordAccess(r, keyID)
|
||||||
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), keyIDContextKey, keyID)))
|
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), keyIDContextKey, keyID)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -39,12 +46,14 @@ func Middleware(cfg config.AuthConfig, keyring *Keyring, oauthRegistry *OAuthReg
|
|||||||
if bearer := extractBearer(r); bearer != "" {
|
if bearer := extractBearer(r); bearer != "" {
|
||||||
if tokenStore != nil {
|
if tokenStore != nil {
|
||||||
if keyID, ok := tokenStore.Lookup(bearer); ok {
|
if keyID, ok := tokenStore.Lookup(bearer); ok {
|
||||||
|
recordAccess(r, keyID)
|
||||||
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), keyIDContextKey, keyID)))
|
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), keyIDContextKey, keyID)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if keyring != nil {
|
if keyring != nil {
|
||||||
if keyID, ok := keyring.Lookup(bearer); ok {
|
if keyID, ok := keyring.Lookup(bearer); ok {
|
||||||
|
recordAccess(r, keyID)
|
||||||
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), keyIDContextKey, keyID)))
|
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), keyIDContextKey, keyID)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -66,6 +75,7 @@ func Middleware(cfg config.AuthConfig, keyring *Keyring, oauthRegistry *OAuthReg
|
|||||||
http.Error(w, "invalid OAuth client credentials", http.StatusUnauthorized)
|
http.Error(w, "invalid OAuth client credentials", http.StatusUnauthorized)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
recordAccess(r, keyID)
|
||||||
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), keyIDContextKey, keyID)))
|
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), keyIDContextKey, keyID)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -79,6 +89,7 @@ func Middleware(cfg config.AuthConfig, keyring *Keyring, oauthRegistry *OAuthReg
|
|||||||
http.Error(w, "invalid API key", http.StatusUnauthorized)
|
http.Error(w, "invalid API key", http.StatusUnauthorized)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
recordAccess(r, keyID)
|
||||||
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), keyIDContextKey, keyID)))
|
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), keyIDContextKey, keyID)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ func TestMiddlewareAllowsOAuthBasicAuthAndSetsContext(t *testing.T) {
|
|||||||
t.Fatalf("NewOAuthRegistry() error = %v", err)
|
t.Fatalf("NewOAuthRegistry() error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := Middleware(config.AuthConfig{}, nil, oauthRegistry, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
handler := Middleware(config.AuthConfig{}, nil, oauthRegistry, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
keyID, ok := KeyIDFromContext(r.Context())
|
keyID, ok := KeyIDFromContext(r.Context())
|
||||||
if !ok || keyID != "oauth-client" {
|
if !ok || keyID != "oauth-client" {
|
||||||
t.Fatalf("KeyIDFromContext() = (%q, %v), want (oauth-client, true)", keyID, ok)
|
t.Fatalf("KeyIDFromContext() = (%q, %v), want (oauth-client, true)", keyID, ok)
|
||||||
@@ -70,7 +70,7 @@ func TestMiddlewareRejectsOAuthMissingOrInvalidCredentials(t *testing.T) {
|
|||||||
t.Fatalf("NewOAuthRegistry() error = %v", err)
|
t.Fatalf("NewOAuthRegistry() error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := Middleware(config.AuthConfig{}, nil, oauthRegistry, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
handler := Middleware(config.AuthConfig{}, nil, oauthRegistry, nil, nil, testLogger())(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
t.Fatal("next handler should not be called")
|
t.Fatal("next handler should not be called")
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
|||||||
@@ -36,6 +36,11 @@ type MCPConfig struct {
|
|||||||
Version string `yaml:"version"`
|
Version string `yaml:"version"`
|
||||||
Transport string `yaml:"transport"`
|
Transport string `yaml:"transport"`
|
||||||
SessionTimeout time.Duration `yaml:"session_timeout"`
|
SessionTimeout time.Duration `yaml:"session_timeout"`
|
||||||
|
// PublicURL is the externally reachable base URL of this server (e.g. https://amcs.example.com).
|
||||||
|
// When set, it is used to build absolute icon URLs in the MCP server identity.
|
||||||
|
PublicURL string `yaml:"public_url"`
|
||||||
|
// Instructions is set at startup from the embedded memory.md and sent to MCP clients on initialise.
|
||||||
|
Instructions string `yaml:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type AuthConfig struct {
|
type AuthConfig struct {
|
||||||
|
|||||||
@@ -117,6 +117,7 @@ func defaultConfig() Config {
|
|||||||
|
|
||||||
func applyEnvOverrides(cfg *Config) {
|
func applyEnvOverrides(cfg *Config) {
|
||||||
overrideString(&cfg.Database.URL, "AMCS_DATABASE_URL")
|
overrideString(&cfg.Database.URL, "AMCS_DATABASE_URL")
|
||||||
|
overrideString(&cfg.MCP.PublicURL, "AMCS_PUBLIC_URL")
|
||||||
overrideString(&cfg.AI.LiteLLM.BaseURL, "AMCS_LITELLM_BASE_URL")
|
overrideString(&cfg.AI.LiteLLM.BaseURL, "AMCS_LITELLM_BASE_URL")
|
||||||
overrideString(&cfg.AI.LiteLLM.APIKey, "AMCS_LITELLM_API_KEY")
|
overrideString(&cfg.AI.LiteLLM.APIKey, "AMCS_LITELLM_API_KEY")
|
||||||
overrideString(&cfg.AI.Ollama.BaseURL, "AMCS_OLLAMA_BASE_URL")
|
overrideString(&cfg.AI.Ollama.BaseURL, "AMCS_OLLAMA_BASE_URL")
|
||||||
|
|||||||
@@ -3,11 +3,18 @@ package mcpserver
|
|||||||
import (
|
import (
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
|
|
||||||
"git.warky.dev/wdevs/amcs/internal/config"
|
"git.warky.dev/wdevs/amcs/internal/config"
|
||||||
"git.warky.dev/wdevs/amcs/internal/tools"
|
"git.warky.dev/wdevs/amcs/internal/tools"
|
||||||
|
amcsllm "git.warky.dev/wdevs/amcs/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
serverTitle = "Avalon Memory Crystal Server"
|
||||||
|
serverWebsiteURL = "https://git.warky.dev/wdevs/amcs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ToolSet struct {
|
type ToolSet struct {
|
||||||
@@ -36,13 +43,24 @@ type ToolSet struct {
|
|||||||
CRM *tools.CRMTool
|
CRM *tools.CRMTool
|
||||||
Skills *tools.SkillsTool
|
Skills *tools.SkillsTool
|
||||||
ChatHistory *tools.ChatHistoryTool
|
ChatHistory *tools.ChatHistoryTool
|
||||||
|
Describe *tools.DescribeTool
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(cfg config.MCPConfig, logger *slog.Logger, toolSet ToolSet, onSessionClosed func(string)) (http.Handler, error) {
|
func New(cfg config.MCPConfig, logger *slog.Logger, toolSet ToolSet, onSessionClosed func(string)) (http.Handler, error) {
|
||||||
|
instructions := cfg.Instructions
|
||||||
|
if instructions == "" {
|
||||||
|
instructions = string(amcsllm.MemoryInstructions)
|
||||||
|
}
|
||||||
|
|
||||||
server := mcp.NewServer(&mcp.Implementation{
|
server := mcp.NewServer(&mcp.Implementation{
|
||||||
Name: cfg.ServerName,
|
Name: cfg.ServerName,
|
||||||
Version: cfg.Version,
|
Title: serverTitle,
|
||||||
}, nil)
|
Version: cfg.Version,
|
||||||
|
WebsiteURL: serverWebsiteURL,
|
||||||
|
Icons: buildServerIcons(cfg.PublicURL),
|
||||||
|
}, &mcp.ServerOptions{
|
||||||
|
Instructions: instructions,
|
||||||
|
})
|
||||||
|
|
||||||
for _, register := range []func(*mcp.Server, *slog.Logger, ToolSet) error{
|
for _, register := range []func(*mcp.Server, *slog.Logger, ToolSet) error{
|
||||||
registerSystemTools,
|
registerSystemTools,
|
||||||
@@ -56,6 +74,7 @@ func New(cfg config.MCPConfig, logger *slog.Logger, toolSet ToolSet, onSessionCl
|
|||||||
registerCRMTools,
|
registerCRMTools,
|
||||||
registerSkillTools,
|
registerSkillTools,
|
||||||
registerChatHistoryTools,
|
registerChatHistoryTools,
|
||||||
|
registerDescribeTools,
|
||||||
} {
|
} {
|
||||||
if err := register(server, logger, toolSet); err != nil {
|
if err := register(server, logger, toolSet); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -75,6 +94,18 @@ func New(cfg config.MCPConfig, logger *slog.Logger, toolSet ToolSet, onSessionCl
|
|||||||
}, opts), nil
|
}, opts), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// buildServerIcons returns icon definitions referencing the server's own /images/icon.png endpoint.
|
||||||
|
// Returns nil when publicURL is empty so the icons field is omitted from the MCP identity.
|
||||||
|
func buildServerIcons(publicURL string) []mcp.Icon {
|
||||||
|
if strings.TrimSpace(publicURL) == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
base := strings.TrimRight(publicURL, "/")
|
||||||
|
return []mcp.Icon{
|
||||||
|
{Source: base + "/images/icon.png", MIMEType: "image/png"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func registerSystemTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet) error {
|
func registerSystemTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet) error {
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "get_version_info",
|
Name: "get_version_info",
|
||||||
@@ -88,13 +119,13 @@ func registerSystemTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSe
|
|||||||
func registerThoughtTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet) error {
|
func registerThoughtTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet) error {
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "capture_thought",
|
Name: "capture_thought",
|
||||||
Description: "Store a thought with generated embeddings and extracted metadata.",
|
Description: "Store a thought with generated embeddings and extracted metadata. The thought is saved immediately even if metadata extraction times out; pending thoughts are retried in the background.",
|
||||||
}, toolSet.Capture.Handle); err != nil {
|
}, toolSet.Capture.Handle); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "search_thoughts",
|
Name: "search_thoughts",
|
||||||
Description: "Search stored thoughts by semantic similarity.",
|
Description: "Search stored thoughts by semantic similarity. Falls back to Postgres full-text search automatically when no embeddings exist for the active model.",
|
||||||
}, toolSet.Search.Handle); err != nil {
|
}, toolSet.Search.Handle); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -136,13 +167,13 @@ func registerThoughtTools(server *mcp.Server, logger *slog.Logger, toolSet ToolS
|
|||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "summarize_thoughts",
|
Name: "summarize_thoughts",
|
||||||
Description: "Summarize a filtered or searched set of thoughts.",
|
Description: "Produce an LLM prose summary of a filtered or searched set of thoughts.",
|
||||||
}, toolSet.Summarize.Handle); err != nil {
|
}, toolSet.Summarize.Handle); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "recall_context",
|
Name: "recall_context",
|
||||||
Description: "Recall semantically relevant and recent context.",
|
Description: "Recall semantically relevant and recent context for prompt injection. Combines vector similarity with recency. Falls back to full-text search when no embeddings exist.",
|
||||||
}, toolSet.Recall.Handle); err != nil {
|
}, toolSet.Recall.Handle); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -154,7 +185,7 @@ func registerThoughtTools(server *mcp.Server, logger *slog.Logger, toolSet ToolS
|
|||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "related_thoughts",
|
Name: "related_thoughts",
|
||||||
Description: "Retrieve explicit links and semantic neighbors for a thought.",
|
Description: "Retrieve explicit links and semantic neighbours for a thought. Falls back to full-text search when no embeddings exist.",
|
||||||
}, toolSet.Links.Related); err != nil {
|
}, toolSet.Links.Related); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -176,19 +207,19 @@ func registerProjectTools(server *mcp.Server, logger *slog.Logger, toolSet ToolS
|
|||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "set_active_project",
|
Name: "set_active_project",
|
||||||
Description: "Set the active project for the current MCP session. Requires a stateful MCP client that reuses the same session across calls.",
|
Description: "Set the active project for the current MCP session. Requires a stateful MCP client that reuses the same session across calls. If your client does not preserve sessions, pass project explicitly to each tool instead.",
|
||||||
}, toolSet.Projects.SetActive); err != nil {
|
}, toolSet.Projects.SetActive); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "get_active_project",
|
Name: "get_active_project",
|
||||||
Description: "Return the active project for the current MCP session. If your client does not preserve MCP sessions, pass project explicitly to project-scoped tools instead.",
|
Description: "Return the active project for the current MCP session. If your client does not preserve MCP sessions, pass project explicitly to project-scoped tools instead of relying on this.",
|
||||||
}, toolSet.Projects.GetActive); err != nil {
|
}, toolSet.Projects.GetActive); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "get_project_context",
|
Name: "get_project_context",
|
||||||
Description: "Get recent and semantic context for a project. Uses the explicit project when provided, otherwise the active MCP session project.",
|
Description: "Get recent and semantic context for a project. Uses the explicit project when provided, otherwise the active MCP session project. Falls back to full-text search when no embeddings exist.",
|
||||||
}, toolSet.Context.Handle); err != nil {
|
}, toolSet.Context.Handle); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -204,19 +235,19 @@ func registerFileTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet)
|
|||||||
|
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "upload_file",
|
Name: "upload_file",
|
||||||
Description: "Stage a file and get an amcs://files/{id} resource URI. Provide content_path (absolute server-side path, no size limit) or content_base64 (≤10 MB). Optionally link immediately with thought_id/project, or omit them and pass the returned URI to save_file later.",
|
Description: "Stage a file and get an amcs://files/{id} resource URI. Use content_path (absolute server-side path, no size limit) for large or binary files, or content_base64 (≤10 MB) for small files. Pass thought_id/project to link immediately, or omit and pass the URI to save_file later.",
|
||||||
}, toolSet.Files.Upload); err != nil {
|
}, toolSet.Files.Upload); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "save_file",
|
Name: "save_file",
|
||||||
Description: "Store a file and optionally link it to a thought. Supply either content_base64 (≤10 MB) or content_uri (amcs://files/{id} from a prior upload_file or POST /files call). For files larger than 10 MB, use upload_file with content_path first.",
|
Description: "Store a file and optionally link it to a thought. Use content_base64 (≤10 MB) for small files, or content_uri (amcs://files/{id} from a prior upload_file) for previously staged files. For files larger than 10 MB, use upload_file with content_path first. If the goal is to retain the artifact, store the file directly instead of reading or summarising it first.",
|
||||||
}, toolSet.Files.Save); err != nil {
|
}, toolSet.Files.Save); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "load_file",
|
Name: "load_file",
|
||||||
Description: "Load a previously stored file by id and return its metadata and base64 content.",
|
Description: "Load a stored file by id. Returns metadata, base64 content, and an embedded MCP binary resource at amcs://files/{id}. Prefer the embedded resource when your client supports it. The id field accepts a bare UUID or full amcs://files/{id} URI.",
|
||||||
}, toolSet.Files.Load); err != nil {
|
}, toolSet.Files.Load); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -232,7 +263,7 @@ func registerFileTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet)
|
|||||||
func registerMaintenanceTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet) error {
|
func registerMaintenanceTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet) error {
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "backfill_embeddings",
|
Name: "backfill_embeddings",
|
||||||
Description: "Generate missing embeddings for stored thoughts using the active embedding model.",
|
Description: "Generate missing embeddings for stored thoughts using the active embedding model. Run this after switching embedding models or importing thoughts that have no vectors.",
|
||||||
}, toolSet.Backfill.Handle); err != nil {
|
}, toolSet.Backfill.Handle); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -492,7 +523,7 @@ func registerSkillTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet
|
|||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "list_project_skills",
|
Name: "list_project_skills",
|
||||||
Description: "List all skills linked to a project. Call this at the start of a project session to load existing agent behaviour instructions before generating new ones. Pass project explicitly when your client does not preserve MCP sessions.",
|
Description: "List all skills linked to a project. Call this at the start of every project session to load agent behaviour instructions before generating new ones. Only create new skills if none are returned. Pass project explicitly when your client does not preserve MCP sessions.",
|
||||||
}, toolSet.Skills.ListProjectSkills); err != nil {
|
}, toolSet.Skills.ListProjectSkills); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -510,7 +541,7 @@ func registerSkillTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet
|
|||||||
}
|
}
|
||||||
if err := addTool(server, logger, &mcp.Tool{
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
Name: "list_project_guardrails",
|
Name: "list_project_guardrails",
|
||||||
Description: "List all guardrails linked to a project. Call this at the start of a project session to load existing agent constraints before generating new ones. Pass project explicitly when your client does not preserve MCP sessions.",
|
Description: "List all guardrails linked to a project. Call this at the start of every project session to load agent constraints before generating new ones. Only create new guardrails if none are returned. Pass project explicitly when your client does not preserve MCP sessions.",
|
||||||
}, toolSet.Skills.ListProjectGuardrails); err != nil {
|
}, toolSet.Skills.ListProjectGuardrails); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -544,3 +575,123 @@ func registerChatHistoryTools(server *mcp.Server, logger *slog.Logger, toolSet T
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func registerDescribeTools(server *mcp.Server, logger *slog.Logger, toolSet ToolSet) error {
|
||||||
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
|
Name: "describe_tools",
|
||||||
|
Description: "Call this first in every session. Returns all available MCP tools with names, descriptions, categories, and your accumulated usage notes. Filter by category to narrow results. Available categories: system, thoughts, projects, files, admin, household, maintenance, calendar, meals, crm, skills, chat, meta.",
|
||||||
|
}, toolSet.Describe.Describe); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := addTool(server, logger, &mcp.Tool{
|
||||||
|
Name: "annotate_tool",
|
||||||
|
Description: "Persist usage notes, gotchas, or workflow patterns for a specific tool. Notes survive across sessions and are returned by describe_tools. Call this whenever you discover something non-obvious about a tool's behaviour. Pass an empty string to clear notes.",
|
||||||
|
}, toolSet.Describe.Annotate); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildToolCatalog returns the static catalog of all registered MCP tools.
|
||||||
|
// Pass this to tools.NewDescribeTool when assembling the ToolSet.
|
||||||
|
func BuildToolCatalog() []tools.ToolEntry {
|
||||||
|
return []tools.ToolEntry{
|
||||||
|
// system
|
||||||
|
{Name: "get_version_info", Description: "Return the server build version information, including version, tag name, commit, and build date.", Category: "system"},
|
||||||
|
|
||||||
|
// thoughts
|
||||||
|
{Name: "capture_thought", Description: "Store a thought with generated embeddings and extracted metadata. The thought is saved immediately even if metadata extraction times out; pending thoughts are retried in the background.", Category: "thoughts"},
|
||||||
|
{Name: "search_thoughts", Description: "Search stored thoughts by semantic similarity. Falls back to Postgres full-text search automatically when no embeddings exist for the active model.", Category: "thoughts"},
|
||||||
|
{Name: "list_thoughts", Description: "List recent thoughts with optional metadata filters.", Category: "thoughts"},
|
||||||
|
{Name: "thought_stats", Description: "Get counts and top metadata buckets across stored thoughts.", Category: "thoughts"},
|
||||||
|
{Name: "get_thought", Description: "Retrieve a full thought by id.", Category: "thoughts"},
|
||||||
|
{Name: "update_thought", Description: "Update thought content or merge metadata.", Category: "thoughts"},
|
||||||
|
{Name: "delete_thought", Description: "Hard-delete a thought by id.", Category: "thoughts"},
|
||||||
|
{Name: "archive_thought", Description: "Archive a thought so it is hidden from default search and listing.", Category: "thoughts"},
|
||||||
|
{Name: "summarize_thoughts", Description: "Produce an LLM prose summary of a filtered or searched set of thoughts.", Category: "thoughts"},
|
||||||
|
{Name: "recall_context", Description: "Recall semantically relevant and recent context for prompt injection. Combines vector similarity with recency. Falls back to full-text search when no embeddings exist.", Category: "thoughts"},
|
||||||
|
{Name: "link_thoughts", Description: "Create a typed relationship between two thoughts.", Category: "thoughts"},
|
||||||
|
{Name: "related_thoughts", Description: "Retrieve explicit links and semantic neighbours for a thought. Falls back to full-text search when no embeddings exist.", Category: "thoughts"},
|
||||||
|
|
||||||
|
// projects
|
||||||
|
{Name: "create_project", Description: "Create a named project container for thoughts.", Category: "projects"},
|
||||||
|
{Name: "list_projects", Description: "List projects and their current thought counts.", Category: "projects"},
|
||||||
|
{Name: "set_active_project", Description: "Set the active project for the current MCP session. Requires a stateful MCP client that reuses the same session across calls. If your client does not preserve sessions, pass project explicitly to each tool instead.", Category: "projects"},
|
||||||
|
{Name: "get_active_project", Description: "Return the active project for the current MCP session. If your client does not preserve MCP sessions, pass project explicitly to project-scoped tools instead of relying on this.", Category: "projects"},
|
||||||
|
{Name: "get_project_context", Description: "Get recent and semantic context for a project. Uses the explicit project when provided, otherwise the active MCP session project. Falls back to full-text search when no embeddings exist.", Category: "projects"},
|
||||||
|
|
||||||
|
// files
|
||||||
|
{Name: "upload_file", Description: "Stage a file and get an amcs://files/{id} resource URI. Use content_path (absolute server-side path, no size limit) for large or binary files, or content_base64 (≤10 MB) for small files. Pass thought_id/project to link immediately, or omit and pass the URI to save_file later.", Category: "files"},
|
||||||
|
{Name: "save_file", Description: "Store a file and optionally link it to a thought. Use content_base64 (≤10 MB) for small files, or content_uri (amcs://files/{id} from a prior upload_file) for previously staged files. For files larger than 10 MB, use upload_file with content_path first. If the goal is to retain the artifact, store the file directly instead of reading or summarising it first.", Category: "files"},
|
||||||
|
{Name: "load_file", Description: "Load a stored file by id. Returns metadata, base64 content, and an embedded MCP binary resource at amcs://files/{id}. Prefer the embedded resource when your client supports it. The id field accepts a bare UUID or full amcs://files/{id} URI.", Category: "files"},
|
||||||
|
{Name: "list_files", Description: "List stored files, optionally filtered by thought, project, or kind.", Category: "files"},
|
||||||
|
|
||||||
|
// admin
|
||||||
|
{Name: "backfill_embeddings", Description: "Generate missing embeddings for stored thoughts using the active embedding model. Run this after switching embedding models or importing thoughts that have no vectors.", Category: "admin"},
|
||||||
|
{Name: "reparse_thought_metadata", Description: "Re-extract and normalize metadata for stored thoughts from their content.", Category: "admin"},
|
||||||
|
{Name: "retry_failed_metadata", Description: "Retry metadata extraction for thoughts still marked pending or failed.", Category: "admin"},
|
||||||
|
|
||||||
|
// household
|
||||||
|
{Name: "add_household_item", Description: "Store a household fact (paint color, appliance details, measurement, document, etc.).", Category: "household"},
|
||||||
|
{Name: "search_household_items", Description: "Search household items by name, category, or location.", Category: "household"},
|
||||||
|
{Name: "get_household_item", Description: "Retrieve a household item by id.", Category: "household"},
|
||||||
|
{Name: "add_vendor", Description: "Add a service provider (plumber, electrician, landscaper, etc.).", Category: "household"},
|
||||||
|
{Name: "list_vendors", Description: "List household service vendors, optionally filtered by service type.", Category: "household"},
|
||||||
|
|
||||||
|
// maintenance
|
||||||
|
{Name: "add_maintenance_task", Description: "Create a recurring or one-time home maintenance task.", Category: "maintenance"},
|
||||||
|
{Name: "log_maintenance", Description: "Log completed maintenance work; automatically updates the task's next due date.", Category: "maintenance"},
|
||||||
|
{Name: "get_upcoming_maintenance", Description: "List maintenance tasks due within the next N days.", Category: "maintenance"},
|
||||||
|
{Name: "search_maintenance_history", Description: "Search the maintenance log by task name, category, or date range.", Category: "maintenance"},
|
||||||
|
|
||||||
|
// calendar
|
||||||
|
{Name: "add_family_member", Description: "Add a family member to the household.", Category: "calendar"},
|
||||||
|
{Name: "list_family_members", Description: "List all family members.", Category: "calendar"},
|
||||||
|
{Name: "add_activity", Description: "Schedule a one-time or recurring family activity.", Category: "calendar"},
|
||||||
|
{Name: "get_week_schedule", Description: "Get all activities scheduled for a given week.", Category: "calendar"},
|
||||||
|
{Name: "search_activities", Description: "Search activities by title, type, or family member.", Category: "calendar"},
|
||||||
|
{Name: "add_important_date", Description: "Track a birthday, anniversary, deadline, or other important date.", Category: "calendar"},
|
||||||
|
{Name: "get_upcoming_dates", Description: "Get important dates coming up in the next N days.", Category: "calendar"},
|
||||||
|
|
||||||
|
// meals
|
||||||
|
{Name: "add_recipe", Description: "Save a recipe with ingredients and instructions.", Category: "meals"},
|
||||||
|
{Name: "search_recipes", Description: "Search recipes by name, cuisine, tags, or ingredient.", Category: "meals"},
|
||||||
|
{Name: "update_recipe", Description: "Update an existing recipe.", Category: "meals"},
|
||||||
|
{Name: "create_meal_plan", Description: "Set the meal plan for a week; replaces any existing plan for that week.", Category: "meals"},
|
||||||
|
{Name: "get_meal_plan", Description: "Get the meal plan for a given week.", Category: "meals"},
|
||||||
|
{Name: "generate_shopping_list", Description: "Auto-generate a shopping list from the meal plan for a given week.", Category: "meals"},
|
||||||
|
|
||||||
|
// crm
|
||||||
|
{Name: "add_professional_contact", Description: "Add a professional contact to the CRM.", Category: "crm"},
|
||||||
|
{Name: "search_contacts", Description: "Search professional contacts by name, company, title, notes, or tags.", Category: "crm"},
|
||||||
|
{Name: "log_interaction", Description: "Log an interaction with a professional contact.", Category: "crm"},
|
||||||
|
{Name: "get_contact_history", Description: "Get full history (interactions and opportunities) for a contact.", Category: "crm"},
|
||||||
|
{Name: "create_opportunity", Description: "Create a deal, project, or opportunity linked to a contact.", Category: "crm"},
|
||||||
|
{Name: "get_follow_ups_due", Description: "List contacts with a follow-up date due within the next N days.", Category: "crm"},
|
||||||
|
{Name: "link_thought_to_contact", Description: "Append a stored thought to a contact's notes.", Category: "crm"},
|
||||||
|
|
||||||
|
// skills
|
||||||
|
{Name: "add_skill", Description: "Store a reusable agent skill (behavioural instruction or capability prompt).", Category: "skills"},
|
||||||
|
{Name: "remove_skill", Description: "Delete an agent skill by id.", Category: "skills"},
|
||||||
|
{Name: "list_skills", Description: "List all agent skills, optionally filtered by tag.", Category: "skills"},
|
||||||
|
{Name: "add_guardrail", Description: "Store a reusable agent guardrail (constraint or safety rule).", Category: "skills"},
|
||||||
|
{Name: "remove_guardrail", Description: "Delete an agent guardrail by id.", Category: "skills"},
|
||||||
|
{Name: "list_guardrails", Description: "List all agent guardrails, optionally filtered by tag or severity.", Category: "skills"},
|
||||||
|
{Name: "add_project_skill", Description: "Link an agent skill to a project. Pass project explicitly when your client does not preserve MCP sessions.", Category: "skills"},
|
||||||
|
{Name: "remove_project_skill", Description: "Unlink an agent skill from a project. Pass project explicitly when your client does not preserve MCP sessions.", Category: "skills"},
|
||||||
|
{Name: "list_project_skills", Description: "List all skills linked to a project. Call this at the start of every project session to load agent behaviour instructions before generating new ones. Only create new skills if none are returned. Pass project explicitly when your client does not preserve MCP sessions.", Category: "skills"},
|
||||||
|
{Name: "add_project_guardrail", Description: "Link an agent guardrail to a project. Pass project explicitly when your client does not preserve MCP sessions.", Category: "skills"},
|
||||||
|
{Name: "remove_project_guardrail", Description: "Unlink an agent guardrail from a project. Pass project explicitly when your client does not preserve MCP sessions.", Category: "skills"},
|
||||||
|
{Name: "list_project_guardrails", Description: "List all guardrails linked to a project. Call this at the start of every project session to load agent constraints before generating new ones. Only create new guardrails if none are returned. Pass project explicitly when your client does not preserve MCP sessions.", Category: "skills"},
|
||||||
|
|
||||||
|
// chat
|
||||||
|
{Name: "save_chat_history", Description: "Save a chat session's message history for later retrieval. Stores messages with optional title, summary, channel, agent, and project metadata.", Category: "chat"},
|
||||||
|
{Name: "get_chat_history", Description: "Retrieve a saved chat history by its UUID or session_id. Returns the full message list.", Category: "chat"},
|
||||||
|
{Name: "list_chat_histories", Description: "List saved chat histories with optional filters: project, channel, agent_id, session_id, or recent days.", Category: "chat"},
|
||||||
|
{Name: "delete_chat_history", Description: "Permanently delete a saved chat history by id.", Category: "chat"},
|
||||||
|
|
||||||
|
// meta
|
||||||
|
{Name: "describe_tools", Description: "Call this first in every session. Returns all available MCP tools with names, descriptions, categories, and your accumulated usage notes. Filter by category to narrow results. Available categories: system, thoughts, projects, files, admin, household, maintenance, calendar, meals, crm, skills, chat, meta.", Category: "meta"},
|
||||||
|
{Name: "annotate_tool", Description: "Persist usage notes, gotchas, or workflow patterns for a specific tool. Notes survive across sessions and are returned by describe_tools. Call this whenever you discover something non-obvious about a tool's behaviour. Pass an empty string to clear notes.", Category: "meta"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -40,15 +40,19 @@ func TestNewListsAllRegisteredTools(t *testing.T) {
|
|||||||
"add_recipe",
|
"add_recipe",
|
||||||
"add_skill",
|
"add_skill",
|
||||||
"add_vendor",
|
"add_vendor",
|
||||||
|
"annotate_tool",
|
||||||
"archive_thought",
|
"archive_thought",
|
||||||
"backfill_embeddings",
|
"backfill_embeddings",
|
||||||
"capture_thought",
|
"capture_thought",
|
||||||
"create_meal_plan",
|
"create_meal_plan",
|
||||||
"create_opportunity",
|
"create_opportunity",
|
||||||
"create_project",
|
"create_project",
|
||||||
|
"delete_chat_history",
|
||||||
"delete_thought",
|
"delete_thought",
|
||||||
|
"describe_tools",
|
||||||
"generate_shopping_list",
|
"generate_shopping_list",
|
||||||
"get_active_project",
|
"get_active_project",
|
||||||
|
"get_chat_history",
|
||||||
"get_contact_history",
|
"get_contact_history",
|
||||||
"get_follow_ups_due",
|
"get_follow_ups_due",
|
||||||
"get_household_item",
|
"get_household_item",
|
||||||
@@ -61,6 +65,7 @@ func TestNewListsAllRegisteredTools(t *testing.T) {
|
|||||||
"get_week_schedule",
|
"get_week_schedule",
|
||||||
"link_thought_to_contact",
|
"link_thought_to_contact",
|
||||||
"link_thoughts",
|
"link_thoughts",
|
||||||
|
"list_chat_histories",
|
||||||
"list_family_members",
|
"list_family_members",
|
||||||
"list_files",
|
"list_files",
|
||||||
"list_guardrails",
|
"list_guardrails",
|
||||||
@@ -81,6 +86,7 @@ func TestNewListsAllRegisteredTools(t *testing.T) {
|
|||||||
"remove_skill",
|
"remove_skill",
|
||||||
"reparse_thought_metadata",
|
"reparse_thought_metadata",
|
||||||
"retry_failed_metadata",
|
"retry_failed_metadata",
|
||||||
|
"save_chat_history",
|
||||||
"save_file",
|
"save_file",
|
||||||
"search_activities",
|
"search_activities",
|
||||||
"search_contacts",
|
"search_contacts",
|
||||||
|
|||||||
38
internal/store/tool_annotations.go
Normal file
38
internal/store/tool_annotations.go
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (db *DB) UpsertToolAnnotation(ctx context.Context, toolName, notes string) error {
|
||||||
|
_, err := db.pool.Exec(ctx, `
|
||||||
|
insert into tool_annotations (tool_name, notes)
|
||||||
|
values ($1, $2)
|
||||||
|
on conflict (tool_name) do update
|
||||||
|
set notes = excluded.notes,
|
||||||
|
updated_at = now()
|
||||||
|
`, toolName, notes)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("upsert tool annotation: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (db *DB) GetToolAnnotations(ctx context.Context) (map[string]string, error) {
|
||||||
|
rows, err := db.pool.Query(ctx, `select tool_name, notes from tool_annotations`)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get tool annotations: %w", err)
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
annotations := make(map[string]string)
|
||||||
|
for rows.Next() {
|
||||||
|
var toolName, notes string
|
||||||
|
if err := rows.Scan(&toolName, ¬es); err != nil {
|
||||||
|
return nil, fmt.Errorf("scan tool annotation: %w", err)
|
||||||
|
}
|
||||||
|
annotations[toolName] = notes
|
||||||
|
}
|
||||||
|
return annotations, rows.Err()
|
||||||
|
}
|
||||||
89
internal/tools/describe.go
Normal file
89
internal/tools/describe.go
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
package tools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/amcs/internal/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ToolEntry describes a single registered MCP tool.
|
||||||
|
type ToolEntry struct {
|
||||||
|
Name string
|
||||||
|
Description string
|
||||||
|
Category string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DescribeTool implements the describe_tools and annotate_tool MCP tools.
|
||||||
|
type DescribeTool struct {
|
||||||
|
store *store.DB
|
||||||
|
catalog []ToolEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDescribeTool(db *store.DB, catalog []ToolEntry) *DescribeTool {
|
||||||
|
return &DescribeTool{store: db, catalog: catalog}
|
||||||
|
}
|
||||||
|
|
||||||
|
// describe_tools
|
||||||
|
|
||||||
|
type DescribeToolsInput struct {
|
||||||
|
Category string `json:"category,omitempty" jsonschema:"filter results to a single category (e.g. thoughts, projects, files, skills, chat, meta)"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AnnotatedToolEntry struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
Category string `json:"category"`
|
||||||
|
Notes string `json:"notes,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DescribeToolsOutput struct {
|
||||||
|
Tools []AnnotatedToolEntry `json:"tools"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *DescribeTool) Describe(ctx context.Context, _ *mcp.CallToolRequest, in DescribeToolsInput) (*mcp.CallToolResult, DescribeToolsOutput, error) {
|
||||||
|
annotations, err := t.store.GetToolAnnotations(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, DescribeToolsOutput{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cat := strings.TrimSpace(strings.ToLower(in.Category))
|
||||||
|
|
||||||
|
entries := make([]AnnotatedToolEntry, 0, len(t.catalog))
|
||||||
|
for _, e := range t.catalog {
|
||||||
|
if cat != "" && e.Category != cat {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
entries = append(entries, AnnotatedToolEntry{
|
||||||
|
Name: e.Name,
|
||||||
|
Description: e.Description,
|
||||||
|
Category: e.Category,
|
||||||
|
Notes: annotations[e.Name],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, DescribeToolsOutput{Tools: entries}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// annotate_tool
|
||||||
|
|
||||||
|
type AnnotateToolInput struct {
|
||||||
|
ToolName string `json:"tool_name" jsonschema:"the exact name of the tool to annotate"`
|
||||||
|
Notes string `json:"notes" jsonschema:"your usage notes, reminders, or gotchas for this tool; pass empty string to clear"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AnnotateToolOutput struct {
|
||||||
|
ToolName string `json:"tool_name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *DescribeTool) Annotate(ctx context.Context, _ *mcp.CallToolRequest, in AnnotateToolInput) (*mcp.CallToolResult, AnnotateToolOutput, error) {
|
||||||
|
if strings.TrimSpace(in.ToolName) == "" {
|
||||||
|
return nil, AnnotateToolOutput{}, errRequiredField("tool_name")
|
||||||
|
}
|
||||||
|
if err := t.store.UpsertToolAnnotation(ctx, in.ToolName, in.Notes); err != nil {
|
||||||
|
return nil, AnnotateToolOutput{}, err
|
||||||
|
}
|
||||||
|
return nil, AnnotateToolOutput{ToolName: in.ToolName}, nil
|
||||||
|
}
|
||||||
@@ -87,6 +87,7 @@ func resolveProject(ctx context.Context, db *store.DB, sessions *session.ActiveP
|
|||||||
Type: mcperrors.TypeProjectNotFound,
|
Type: mcperrors.TypeProjectNotFound,
|
||||||
Field: "project",
|
Field: "project",
|
||||||
Project: projectRef,
|
Project: projectRef,
|
||||||
|
Hint: fmt.Sprintf("project %q does not exist yet; call create_project with name=%q first, then retry", projectRef, projectRef),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,15 +18,30 @@ Use AMCS as memory with two scopes:
|
|||||||
6. If no strong project match exists, you may use global notebook memory with no project.
|
6. If no strong project match exists, you may use global notebook memory with no project.
|
||||||
7. If multiple projects plausibly match, ask the user before reading or writing project memory.
|
7. If multiple projects plausibly match, ask the user before reading or writing project memory.
|
||||||
|
|
||||||
|
## Session Startup
|
||||||
|
|
||||||
|
At the very start of any session with AMCS:
|
||||||
|
|
||||||
|
1. Call `describe_tools` to get the full list of available tools with their categories and any notes you have previously annotated. Read the notes before using a tool — they contain accumulated gotchas, workflow patterns, and field-ordering requirements you have recorded from prior sessions.
|
||||||
|
|
||||||
## Project Session Startup
|
## Project Session Startup
|
||||||
|
|
||||||
At the start of every project session, after setting the active project:
|
After setting the active project:
|
||||||
|
|
||||||
1. Call `list_project_skills` to load any saved agent behaviour instructions for the project.
|
1. Call `list_project_skills` to load any saved agent behaviour instructions for the project.
|
||||||
2. Call `list_project_guardrails` to load any saved agent constraints for the project.
|
2. Call `list_project_guardrails` to load any saved agent constraints for the project.
|
||||||
3. Apply all returned skills and guardrails immediately and for the duration of the session.
|
3. Apply all returned skills and guardrails immediately and for the duration of the session.
|
||||||
4. Only generate or define new skills and guardrails if none are returned. If you do create new ones, store them with `add_skill` or `add_guardrail` and link them to the project with `add_project_skill` or `add_project_guardrail` so they persist for future sessions.
|
4. Only generate or define new skills and guardrails if none are returned. If you do create new ones, store them with `add_skill` or `add_guardrail` and link them to the project with `add_project_skill` or `add_project_guardrail` so they persist for future sessions.
|
||||||
|
|
||||||
|
## Project Does Not Exist Yet
|
||||||
|
|
||||||
|
If any tool returns a `project_not_found` error:
|
||||||
|
|
||||||
|
1. Call `create_project` with the same name you tried to use.
|
||||||
|
2. Immediately retry the original tool call with that project.
|
||||||
|
|
||||||
|
Do not abandon the project scope or retry without a project. The project simply needs to be created first.
|
||||||
|
|
||||||
## Project Memory Rules
|
## Project Memory Rules
|
||||||
|
|
||||||
- Use project memory for code decisions, architecture, TODOs, debugging findings, and context specific to the current repo or workstream.
|
- Use project memory for code decisions, architecture, TODOs, debugging findings, and context specific to the current repo or workstream.
|
||||||
@@ -59,6 +74,16 @@ At the start of every project session, after setting the active project:
|
|||||||
- Do not base64-encode a file to pass it to `save_file` if an `amcs://files/{id}` URI is already available from a prior `upload_file` or HTTP upload.
|
- Do not base64-encode a file to pass it to `save_file` if an `amcs://files/{id}` URI is already available from a prior `upload_file` or HTTP upload.
|
||||||
- When saving, choose the narrowest correct scope: project if project-specific, global if not.
|
- When saving, choose the narrowest correct scope: project if project-specific, global if not.
|
||||||
|
|
||||||
|
## Tool Annotations
|
||||||
|
|
||||||
|
As you learn non-obvious behaviours, gotchas, or workflow patterns for individual tools, persist them with `annotate_tool`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{ "tool_name": "capture_thought", "notes": "Always pass project explicitly — session state is unreliable in this client." }
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes are returned by `describe_tools` in future sessions. Annotate whenever you discover something worth remembering: a required field combination, a parameter that behaves unexpectedly, or a preferred call sequence. Pass an empty string to clear a note.
|
||||||
|
|
||||||
## Skills and Guardrails
|
## Skills and Guardrails
|
||||||
|
|
||||||
- **Skills** are reusable agent behaviour instructions (e.g. output formatting rules, reasoning strategies, workflow conventions).
|
- **Skills** are reusable agent behaviour instructions (e.g. output formatting rules, reasoning strategies, workflow conventions).
|
||||||
@@ -69,4 +94,4 @@ At the start of every project session, after setting the active project:
|
|||||||
|
|
||||||
## Short Operational Form
|
## Short Operational Form
|
||||||
|
|
||||||
Use AMCS memory in project scope when the current work matches a known project. If no clear project matches, global notebook memory is allowed for non-project-specific information. At the start of every project session call `list_project_skills` and `list_project_guardrails` and apply what is returned; only create new skills or guardrails if none exist. If your MCP client does not preserve sessions across calls, pass `project` explicitly instead of relying on `set_active_project`. Store durable notes with `capture_thought`. For binary files or files larger than 10 MB, call `upload_file` with `content_path` to stage the file and get an `amcs://files/{id}` URI, then pass that URI to `save_file` as `content_uri` to link it to a thought. For small files, use `save_file` or `upload_file` with `content_base64` directly. Browse stored files with `list_files`, and load them with `load_file` only when their contents are needed. Stored files can also be read as raw binary via MCP resources at `amcs://files/{id}`. Never store project-specific memory globally when a matching project exists, and never store memory in the wrong project. If project matching is ambiguous, ask the user.
|
At the start of every session, call `describe_tools` to read the full tool list and any accumulated usage notes. Use AMCS memory in project scope when the current work matches a known project; if no clear project matches, global notebook memory is allowed for non-project-specific information. At the start of every project session call `list_project_skills` and `list_project_guardrails` and apply what is returned; only create new skills or guardrails if none exist. If your MCP client does not preserve sessions across calls, pass `project` explicitly instead of relying on `set_active_project`. Store durable notes with `capture_thought`. For binary files or files larger than 10 MB, call `upload_file` with `content_path` to stage the file and get an `amcs://files/{id}` URI, then pass that URI to `save_file` as `content_uri` to link it to a thought. For small files, use `save_file` or `upload_file` with `content_base64` directly. Browse stored files with `list_files`, and load them with `load_file` only when their contents are needed. Stored files can also be read as raw binary via MCP resources at `amcs://files/{id}`. Never store project-specific memory globally when a matching project exists, and never store memory in the wrong project. If project matching is ambiguous, ask the user. If a tool returns `project_not_found`, call `create_project` with that name and retry — never drop the project scope. Whenever you discover a non-obvious tool behaviour, gotcha, or workflow pattern, record it with `annotate_tool` so future sessions benefit.
|
||||||
|
|||||||
1901
llm/plan.md
1901
llm/plan.md
File diff suppressed because it is too large
Load Diff
500
llm/todo.md
500
llm/todo.md
@@ -1,450 +1,126 @@
|
|||||||
# AMCS TODO
|
# AMCS TODO
|
||||||
## Auto Embedding Backfill Tool
|
## Embedding Backfill and Text-Search Fallback Audit
|
||||||
|
|
||||||
## Objective
|
This file originally described the planned `backfill_embeddings` work and semantic-to-text fallback behavior. Most of that work is now implemented. This document now tracks what landed, what still needs verification, and what follow-up work remains.
|
||||||
|
|
||||||
Add an MCP tool that automatically backfills missing embeddings for existing thoughts so semantic search keeps working after:
|
For current operator-facing behavior, prefer `README.md`.
|
||||||
|
|
||||||
* embedding model changes
|
|
||||||
* earlier capture or update failures
|
|
||||||
* import or migration of raw thoughts without vectors
|
|
||||||
|
|
||||||
The tool should be safe to run repeatedly, should not duplicate work, and should make it easy to restore semantic coverage without rewriting existing thoughts.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Desired outcome
|
## Status summary
|
||||||
|
|
||||||
After this work:
|
### Implemented
|
||||||
|
|
||||||
* raw thought text remains the source of truth
|
The main work described in this file is already present in the repo:
|
||||||
* embeddings are treated as derived data per model
|
|
||||||
* search continues to query only embeddings from the active embedding model
|
- `backfill_embeddings` MCP tool exists
|
||||||
* when no embeddings exist for the active model and scope, search falls back to Postgres text search
|
- missing-embedding selection helpers exist in the store layer
|
||||||
* operators or MCP clients can trigger a backfill for the current model
|
- embedding upsert helpers exist in the store layer
|
||||||
* AMCS can optionally auto-run a limited backfill pass on startup or on a schedule later
|
- semantic retrieval falls back to Postgres full-text search when the active model has no embeddings in scope
|
||||||
|
- fallback behavior is wired into the main query-driven tools
|
||||||
|
- a full-text index migration exists
|
||||||
|
- optional automatic backfill runner exists in config/startup flow
|
||||||
|
- retry and reparse maintenance tooling also exists around metadata quality
|
||||||
|
|
||||||
|
### Still worth checking or improving
|
||||||
|
|
||||||
|
The broad feature is done, but some implementation-depth items are still worth tracking:
|
||||||
|
|
||||||
|
- test coverage around fallback/backfill behavior
|
||||||
|
- whether configured backfill batching is used consistently end-to-end
|
||||||
|
- observability depth beyond logs
|
||||||
|
- response visibility into which retrieval mode was used
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Why this is needed
|
## What is already implemented
|
||||||
|
|
||||||
Current search behavior is model-specific:
|
### Backfill tool
|
||||||
|
|
||||||
* query text is embedded with the configured provider model
|
Implemented:
|
||||||
* results are filtered by `embeddings.model`
|
|
||||||
* thoughts with no embedding for that model are invisible to semantic search
|
|
||||||
|
|
||||||
This means a model switch leaves old thoughts searchable only by listing and metadata filters until new embeddings are generated.
|
- `backfill_embeddings`
|
||||||
|
- project scoping
|
||||||
|
- archived-thought filtering
|
||||||
|
- age filtering
|
||||||
|
- dry-run mode
|
||||||
|
- bounded concurrency
|
||||||
|
- best-effort per-item failure handling
|
||||||
|
- idempotent embedding upsert behavior
|
||||||
|
|
||||||
To avoid that dead zone, AMCS should also support a lexical fallback path backed by native Postgres text-search indexing.
|
### Search fallback
|
||||||
|
|
||||||
|
Implemented:
|
||||||
|
|
||||||
|
- full-text fallback when no embeddings exist for the active model in scope
|
||||||
|
- fallback helper shared by query-based tools
|
||||||
|
- full-text index migration on thought content
|
||||||
|
|
||||||
|
### Tools using fallback
|
||||||
|
|
||||||
|
Implemented fallback coverage for:
|
||||||
|
|
||||||
|
- `search_thoughts`
|
||||||
|
- `recall_context`
|
||||||
|
- `get_project_context` when a query is provided
|
||||||
|
- `summarize_thoughts` when a query is provided
|
||||||
|
- semantic neighbors in `related_thoughts`
|
||||||
|
|
||||||
|
### Optional automatic behavior
|
||||||
|
|
||||||
|
Implemented:
|
||||||
|
|
||||||
|
- config-gated startup backfill pass
|
||||||
|
- config-gated periodic backfill loop
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Tool proposal
|
## Remaining follow-ups
|
||||||
|
|
||||||
### New MCP tool
|
### 1. Expose retrieval mode in responses
|
||||||
|
|
||||||
`backfill_embeddings`
|
Still outstanding.
|
||||||
|
|
||||||
Purpose:
|
Why it matters:
|
||||||
|
- callers currently benefit from fallback automatically
|
||||||
|
- but debugging is easier if responses explicitly say whether retrieval was `semantic` or `text`
|
||||||
|
|
||||||
* find thoughts missing an embedding for the active model
|
Suggested shape:
|
||||||
* generate embeddings in batches
|
- add a machine-readable field such as `retrieval_mode: semantic|text`
|
||||||
* write embeddings with upsert semantics
|
- keep it consistent across all query-based tools that use shared retrieval logic
|
||||||
* report counts for scanned, embedded, skipped, and failed thoughts
|
|
||||||
|
|
||||||
### Input
|
### 2. Verify and improve tests
|
||||||
|
|
||||||
```json
|
Still worth auditing.
|
||||||
{
|
|
||||||
"project": "optional project name or id",
|
|
||||||
"limit": 100,
|
|
||||||
"batch_size": 20,
|
|
||||||
"include_archived": false,
|
|
||||||
"older_than_days": 0,
|
|
||||||
"dry_run": false
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Notes:
|
Recommended checks:
|
||||||
|
- no-embedding scope falls back to text search
|
||||||
|
- project-scoped fallback only searches within project scope
|
||||||
|
- archived thoughts remain excluded by default
|
||||||
|
- `related_thoughts` falls back correctly when semantic vectors are unavailable
|
||||||
|
- backfill creates embeddings that later restore semantic search
|
||||||
|
|
||||||
* `project` scopes the backfill to a project when desired
|
### 3. Re-embedding / migration ergonomics
|
||||||
* `limit` caps total thoughts processed in one tool call
|
|
||||||
* `batch_size` controls provider load
|
|
||||||
* `include_archived` defaults to `false`
|
|
||||||
* `older_than_days` is optional and mainly useful to avoid racing with fresh writes
|
|
||||||
* `dry_run` returns counts and sample IDs without calling the embedding provider
|
|
||||||
|
|
||||||
### Output
|
Still optional future work.
|
||||||
|
|
||||||
```json
|
Potential additions:
|
||||||
{
|
- count missing embeddings by project
|
||||||
"model": "openai/text-embedding-3-small",
|
- add `missing_embeddings` stats to `thought_stats`
|
||||||
"scanned": 100,
|
- add a controlled re-embed or reindex flow for model migrations
|
||||||
"embedded": 87,
|
|
||||||
"skipped": 13,
|
|
||||||
"failed": 0,
|
|
||||||
"dry_run": false,
|
|
||||||
"failures": []
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Optional:
|
|
||||||
|
|
||||||
* include a short `next_cursor` later if we add cursor-based paging
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Backfill behavior
|
## Notes for maintainers
|
||||||
|
|
||||||
### Core rules
|
Do not read this file as an untouched future roadmap item anymore. The repo has already implemented the core work described here.
|
||||||
|
|
||||||
* Backfill only when a thought is missing an embedding row for the active model.
|
If more backfill/fallback work is planned, append it as concrete follow-ups against the current codebase rather than preserving the old speculative rollout order.
|
||||||
* Do not recompute embeddings that already exist for that model unless an explicit future `force` flag is added.
|
|
||||||
* Keep embeddings per model side by side in the existing `embeddings` table.
|
|
||||||
* Use `insert ... on conflict (thought_id, model) do update` so retries stay idempotent.
|
|
||||||
|
|
||||||
### Selection query
|
|
||||||
|
|
||||||
Add a store query that returns thoughts where no embedding exists for the requested model.
|
|
||||||
|
|
||||||
Shape:
|
|
||||||
|
|
||||||
* from `thoughts t`
|
|
||||||
* left join `embeddings e on e.thought_id = t.guid and e.model = $model`
|
|
||||||
* filter `e.id is null`
|
|
||||||
* optional filters for project, archived state, age
|
|
||||||
* order by `t.created_at asc`
|
|
||||||
* limit by requested batch
|
|
||||||
|
|
||||||
Ordering oldest first is useful because it steadily restores long-tail recall instead of repeatedly revisiting recent writes.
|
|
||||||
|
|
||||||
### Processing loop
|
|
||||||
|
|
||||||
For each selected thought:
|
|
||||||
|
|
||||||
1. read `content`
|
|
||||||
2. call `provider.Embed(content)`
|
|
||||||
3. upsert embedding row for `thought_id + model`
|
|
||||||
4. continue on per-item failure and collect errors
|
|
||||||
|
|
||||||
Use bounded concurrency instead of fully serial processing so large backfills complete in reasonable time without overwhelming the provider.
|
|
||||||
|
|
||||||
Recommended first pass:
|
|
||||||
|
|
||||||
* one tool invocation handles batches internally
|
|
||||||
* concurrency defaults to a small fixed number like `4`
|
|
||||||
* `batch_size` and concurrency are kept server-side defaults at first, even if only `limit` is exposed in MCP input
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Search fallback behavior
|
## Historical note
|
||||||
|
|
||||||
### Goal
|
The original long-form proposal was replaced during the repo audit because it described work that is now largely complete and was causing issue/document drift.
|
||||||
|
|
||||||
If semantic retrieval cannot run because no embeddings exist for the active model in the selected scope, AMCS should fall back to Postgres text search instead of returning empty semantic results by default.
|
If needed, recover the older version from git history.
|
||||||
|
|
||||||
### Fallback rules
|
|
||||||
|
|
||||||
* If embeddings exist for the active model, keep using vector search as the primary path.
|
|
||||||
* If no embeddings exist for the active model in scope, run Postgres text search against raw thought content.
|
|
||||||
* Fallback should apply to:
|
|
||||||
|
|
||||||
* `search_thoughts`
|
|
||||||
* `recall_context`
|
|
||||||
* `get_project_context` when `query` is provided
|
|
||||||
* `summarize_thoughts` when `query` is provided
|
|
||||||
* semantic neighbors in `related_thoughts`
|
|
||||||
|
|
||||||
* Fallback should not mutate data. It is retrieval-only.
|
|
||||||
* Backfill remains the long-term fix; text search is the immediate safety net.
|
|
||||||
|
|
||||||
### Postgres search approach
|
|
||||||
|
|
||||||
Add a native full-text index on thought content and query it with a matching text-search configuration.
|
|
||||||
|
|
||||||
Recommended first pass:
|
|
||||||
|
|
||||||
* add a migration creating a GIN index on `to_tsvector('simple', content)`
|
|
||||||
* use `websearch_to_tsquery('simple', $query)` for user-entered text
|
|
||||||
* rank results with `ts_rank_cd(...)`
|
|
||||||
* continue excluding archived thoughts by default
|
|
||||||
* continue honoring project scope
|
|
||||||
|
|
||||||
Using the `simple` configuration is a safer default for mixed prose, identifiers, and code-ish text than a language-specific stemmer.
|
|
||||||
|
|
||||||
### Store additions for fallback
|
|
||||||
|
|
||||||
Add store methods such as:
|
|
||||||
|
|
||||||
* `HasEmbeddingsForModel(ctx, model string, projectID *uuid.UUID) (bool, error)`
|
|
||||||
* `SearchThoughtsText(ctx, query string, limit int, projectID *uuid.UUID, excludeID *uuid.UUID) ([]SearchResult, error)`
|
|
||||||
|
|
||||||
These should be used by a shared retrieval helper in `internal/tools` so semantic callers degrade consistently.
|
|
||||||
|
|
||||||
### Notes on ranking
|
|
||||||
|
|
||||||
Text-search scores will not be directly comparable to vector similarity scores.
|
|
||||||
|
|
||||||
That is acceptable in v1 because:
|
|
||||||
|
|
||||||
* each request will use one retrieval mode at a time
|
|
||||||
* fallback is only used when semantic search is unavailable
|
|
||||||
* response payloads can continue to return `similarity` as a generic relevance score
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Auto behavior
|
|
||||||
|
|
||||||
The user asked for an auto backfill tool, so define two layers:
|
|
||||||
|
|
||||||
### Layer 1: explicit MCP tool
|
|
||||||
|
|
||||||
Ship `backfill_embeddings` first.
|
|
||||||
|
|
||||||
This is the lowest-risk path because:
|
|
||||||
|
|
||||||
* it is observable
|
|
||||||
* it is rate-limited by the caller
|
|
||||||
* it avoids surprise provider cost on startup
|
|
||||||
|
|
||||||
### Layer 2: optional automatic runner
|
|
||||||
|
|
||||||
Add a config-gated background runner after the tool exists and is proven stable.
|
|
||||||
|
|
||||||
Config sketch:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
backfill:
|
|
||||||
enabled: false
|
|
||||||
run_on_startup: false
|
|
||||||
interval: "15m"
|
|
||||||
batch_size: 20
|
|
||||||
max_per_run: 100
|
|
||||||
include_archived: false
|
|
||||||
```
|
|
||||||
|
|
||||||
Behavior:
|
|
||||||
|
|
||||||
* on startup, if enabled and `run_on_startup=true`, run a small bounded backfill pass
|
|
||||||
* if `interval` is set, periodically backfill missing embeddings for the active configured model
|
|
||||||
* log counts and failures, but never block server startup on backfill failure
|
|
||||||
|
|
||||||
This keeps the first implementation simple while still giving us a clean path to true automation.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Store changes
|
|
||||||
|
|
||||||
Add store methods focused on missing-model coverage.
|
|
||||||
|
|
||||||
### New methods
|
|
||||||
|
|
||||||
* `ListThoughtsMissingEmbedding(ctx, model string, limit int, projectID *uuid.UUID, includeArchived bool, olderThanDays int) ([]Thought, error)`
|
|
||||||
* `UpsertEmbedding(ctx, thoughtID uuid.UUID, model string, embedding []float32) error`
|
|
||||||
|
|
||||||
### Optional later methods
|
|
||||||
|
|
||||||
* `CountThoughtsMissingEmbedding(ctx, model string, projectID *uuid.UUID, includeArchived bool) (int, error)`
|
|
||||||
* `ListThoughtIDsMissingEmbeddingPage(...)` for cursor-based paging on large datasets
|
|
||||||
|
|
||||||
### Why separate `UpsertEmbedding`
|
|
||||||
|
|
||||||
`InsertThought` and `UpdateThought` already contain embedding upsert logic, but a dedicated helper will:
|
|
||||||
|
|
||||||
* reduce duplication
|
|
||||||
* let backfill avoid full thought updates
|
|
||||||
* make future re-embedding jobs cleaner
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Tooling changes
|
|
||||||
|
|
||||||
### New file
|
|
||||||
|
|
||||||
`internal/tools/backfill.go`
|
|
||||||
|
|
||||||
Responsibilities:
|
|
||||||
|
|
||||||
* parse input
|
|
||||||
* resolve project if provided
|
|
||||||
* select missing thoughts
|
|
||||||
* run bounded embedding generation
|
|
||||||
* record per-item failures without aborting the whole batch
|
|
||||||
* return summary counts
|
|
||||||
|
|
||||||
### MCP registration
|
|
||||||
|
|
||||||
Add the tool to:
|
|
||||||
|
|
||||||
* `internal/mcpserver/server.go`
|
|
||||||
* `internal/mcpserver/schema.go` and tests if needed
|
|
||||||
* `internal/app/app.go` wiring
|
|
||||||
|
|
||||||
Suggested tool description:
|
|
||||||
|
|
||||||
* `Generate missing embeddings for stored thoughts using the active embedding model.`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Config changes
|
|
||||||
|
|
||||||
No config is required for the first manual tool beyond the existing embedding provider settings.
|
|
||||||
|
|
||||||
For the later automatic runner, add:
|
|
||||||
|
|
||||||
* `backfill.enabled`
|
|
||||||
* `backfill.run_on_startup`
|
|
||||||
* `backfill.interval`
|
|
||||||
* `backfill.batch_size`
|
|
||||||
* `backfill.max_per_run`
|
|
||||||
* `backfill.include_archived`
|
|
||||||
|
|
||||||
Validation rules:
|
|
||||||
|
|
||||||
* `batch_size > 0`
|
|
||||||
* `max_per_run >= batch_size`
|
|
||||||
* `interval` must parse when provided
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Failure handling
|
|
||||||
|
|
||||||
The backfill tool should be best-effort, not all-or-nothing.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
|
|
||||||
* one thought failure does not abort the full run
|
|
||||||
* provider errors are captured and counted
|
|
||||||
* database upsert failures are captured and counted
|
|
||||||
* final tool response includes truncated failure details
|
|
||||||
* full details go to logs
|
|
||||||
|
|
||||||
Failure payloads should avoid returning raw thought content to the caller if that would create noisy or sensitive responses. Prefer thought IDs plus short error strings.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Observability
|
|
||||||
|
|
||||||
Add structured logs for:
|
|
||||||
|
|
||||||
* selected model
|
|
||||||
* project scope
|
|
||||||
* scan count
|
|
||||||
* success count
|
|
||||||
* failure count
|
|
||||||
* duration
|
|
||||||
|
|
||||||
Later, metrics can include:
|
|
||||||
|
|
||||||
* `amcs_backfill_runs_total`
|
|
||||||
* `amcs_backfill_embeddings_total`
|
|
||||||
* `amcs_backfill_failures_total`
|
|
||||||
* `amcs_thoughts_missing_embeddings`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Concurrency and rate limiting
|
|
||||||
|
|
||||||
Keep the first version conservative.
|
|
||||||
|
|
||||||
Plan:
|
|
||||||
|
|
||||||
* use a worker pool with a small fixed concurrency
|
|
||||||
* keep batch sizes small by default
|
|
||||||
* stop fetching new work once `limit` is reached
|
|
||||||
* respect `ctx` cancellation so long backfills can be interrupted cleanly
|
|
||||||
|
|
||||||
Do not add provider-specific rate-limit logic in v1 unless real failures show it is needed.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Security and safety
|
|
||||||
|
|
||||||
* Reuse existing MCP auth.
|
|
||||||
* Do not expose a broad `force=true` option in v1.
|
|
||||||
* Default to non-archived thoughts only.
|
|
||||||
* Do not mutate raw thought text or metadata during backfill.
|
|
||||||
* Treat embeddings as derived data that may be regenerated safely.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Testing plan
|
|
||||||
|
|
||||||
### Store tests
|
|
||||||
|
|
||||||
Add tests for:
|
|
||||||
|
|
||||||
* listing thoughts missing embeddings for a model
|
|
||||||
* project-scoped missing-embedding queries
|
|
||||||
* archived thought filtering
|
|
||||||
* idempotent upsert behavior
|
|
||||||
|
|
||||||
### Tool tests
|
|
||||||
|
|
||||||
Add tests for:
|
|
||||||
|
|
||||||
* dry-run mode
|
|
||||||
* successful batch embedding
|
|
||||||
* partial provider failures
|
|
||||||
* empty result set
|
|
||||||
* project resolution
|
|
||||||
* context cancellation
|
|
||||||
|
|
||||||
### Integration tests
|
|
||||||
|
|
||||||
Add a flow covering:
|
|
||||||
|
|
||||||
1. create thoughts without embeddings for a target model
|
|
||||||
2. run `backfill_embeddings`
|
|
||||||
3. confirm rows exist in `embeddings`
|
|
||||||
4. confirm `search_thoughts` can now retrieve them when using that model
|
|
||||||
|
|
||||||
### Fallback search tests
|
|
||||||
|
|
||||||
Add coverage for:
|
|
||||||
|
|
||||||
* no embeddings for model -> `search_thoughts` uses Postgres text search
|
|
||||||
* project-scoped queries only search matching project thoughts
|
|
||||||
* archived thoughts stay excluded by default
|
|
||||||
* `related_thoughts` falls back to text search neighbors when semantic vectors are unavailable
|
|
||||||
* once embeddings exist, semantic search remains the primary path
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Rollout order
|
|
||||||
|
|
||||||
1. Add store helpers for missing-embedding selection and embedding upsert.
|
|
||||||
2. Add Postgres full-text index migration and text-search store helpers.
|
|
||||||
3. Add shared semantic-or-text fallback retrieval logic for query-based tools.
|
|
||||||
4. Add `backfill_embeddings` MCP tool and wire it into the server.
|
|
||||||
5. Add unit and integration tests.
|
|
||||||
6. Document usage in `README.md`.
|
|
||||||
7. Add optional background auto-runner behind config.
|
|
||||||
8. Consider a future `force` or `reindex_model` path only after v1 is stable.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Open questions
|
|
||||||
|
|
||||||
* Should the tool expose `batch_size` to clients, or should batching stay internal?
|
|
||||||
* Should the first version support only the active model, or allow a `model` override for admins?
|
|
||||||
* Should archived thoughts be backfilled by default during startup jobs but not MCP calls?
|
|
||||||
* Do we want a separate CLI/admin command for large one-time reindex jobs outside MCP?
|
|
||||||
|
|
||||||
Recommended answers for v1:
|
|
||||||
|
|
||||||
* keep batching mostly internal
|
|
||||||
* use only the active configured model
|
|
||||||
* exclude archived thoughts by default everywhere
|
|
||||||
* postpone a dedicated CLI until volume justifies it
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Nice follow-ups
|
|
||||||
|
|
||||||
* add a `missing_embeddings` stat to `thought_stats`
|
|
||||||
* expose a read-only tool for counting missing embeddings by project
|
|
||||||
* add a re-embed path for migrating from one model to another in controlled waves
|
|
||||||
* add metadata extraction backfill as a separate job if imported content often lacks metadata
|
|
||||||
* expose the retrieval mode in responses for easier debugging of semantic vs text fallback
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ CREATE TABLE IF NOT EXISTS chat_histories (
|
|||||||
title TEXT,
|
title TEXT,
|
||||||
channel TEXT,
|
channel TEXT,
|
||||||
agent_id TEXT,
|
agent_id TEXT,
|
||||||
project_id UUID REFERENCES projects(id) ON DELETE SET NULL,
|
project_id UUID REFERENCES projects(guid) ON DELETE SET NULL,
|
||||||
messages JSONB NOT NULL DEFAULT '[]',
|
messages JSONB NOT NULL DEFAULT '[]',
|
||||||
summary TEXT,
|
summary TEXT,
|
||||||
metadata JSONB NOT NULL DEFAULT '{}',
|
metadata JSONB NOT NULL DEFAULT '{}',
|
||||||
|
|||||||
14
migrations/019_tool_annotations.sql
Normal file
14
migrations/019_tool_annotations.sql
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
-- Migration: 019_tool_annotations
|
||||||
|
-- Adds a table for model-authored usage notes per tool.
|
||||||
|
|
||||||
|
create table if not exists tool_annotations (
|
||||||
|
id bigserial primary key,
|
||||||
|
tool_name text not null,
|
||||||
|
notes text not null default '',
|
||||||
|
created_at timestamptz not null default now(),
|
||||||
|
updated_at timestamptz not null default now(),
|
||||||
|
constraint tool_annotations_tool_name_unique unique (tool_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
grant all on table public.tool_annotations to amcs;
|
||||||
|
grant usage, select on sequence tool_annotations_id_seq to amcs;
|
||||||
3996
migrations/020_generated_schema.sql
Normal file
3996
migrations/020_generated_schema.sql
Normal file
File diff suppressed because it is too large
Load Diff
35
schema/README.md
Normal file
35
schema/README.md
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# Schema workflow
|
||||||
|
|
||||||
|
The `schema/*.dbml` files are the database schema source of truth.
|
||||||
|
|
||||||
|
## Generate SQL migrations
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make generate-migrations
|
||||||
|
```
|
||||||
|
|
||||||
|
This uses `relspec` to convert the DBML files into PostgreSQL SQL and writes the generated schema migration to:
|
||||||
|
|
||||||
|
- `migrations/020_generated_schema.sql`
|
||||||
|
|
||||||
|
## Check schema drift
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make check-schema-drift
|
||||||
|
```
|
||||||
|
|
||||||
|
This regenerates the SQL from `schema/*.dbml` and compares it with `migrations/020_generated_schema.sql`.
|
||||||
|
If the generated output differs, the command fails so CI can catch schema drift.
|
||||||
|
|
||||||
|
## Workflow
|
||||||
|
|
||||||
|
1. Update the DBML files in `schema/`
|
||||||
|
2. Run `make generate-migrations`
|
||||||
|
3. Review the generated SQL
|
||||||
|
4. Commit both the DBML changes and the generated migration
|
||||||
|
|
||||||
|
Existing handwritten migrations stay in place. Going forward, update the DBML first and regenerate the SQL from there.
|
||||||
44
schema/calendar.dbml
Normal file
44
schema/calendar.dbml
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
Table family_members {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
name text [not null]
|
||||||
|
relationship text
|
||||||
|
birth_date date
|
||||||
|
notes text
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
}
|
||||||
|
|
||||||
|
Table activities {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
family_member_id uuid [ref: > family_members.id]
|
||||||
|
title text [not null]
|
||||||
|
activity_type text
|
||||||
|
day_of_week text
|
||||||
|
start_time time
|
||||||
|
end_time time
|
||||||
|
start_date date
|
||||||
|
end_date date
|
||||||
|
location text
|
||||||
|
notes text
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
day_of_week
|
||||||
|
family_member_id
|
||||||
|
(start_date, end_date)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table important_dates {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
family_member_id uuid [ref: > family_members.id]
|
||||||
|
title text [not null]
|
||||||
|
date_value date [not null]
|
||||||
|
recurring_yearly boolean [not null, default: false]
|
||||||
|
reminder_days_before int [not null, default: 7]
|
||||||
|
notes text
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
date_value
|
||||||
|
}
|
||||||
|
}
|
||||||
48
schema/core.dbml
Normal file
48
schema/core.dbml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
Table thoughts {
|
||||||
|
id bigserial [pk]
|
||||||
|
guid uuid [unique, not null, default: `gen_random_uuid()`]
|
||||||
|
content text [not null]
|
||||||
|
metadata jsonb [default: `'{}'::jsonb`]
|
||||||
|
created_at timestamptz [default: `now()`]
|
||||||
|
updated_at timestamptz [default: `now()`]
|
||||||
|
project_id uuid [ref: > projects.guid]
|
||||||
|
archived_at timestamptz
|
||||||
|
}
|
||||||
|
|
||||||
|
Table projects {
|
||||||
|
id bigserial [pk]
|
||||||
|
guid uuid [unique, not null, default: `gen_random_uuid()`]
|
||||||
|
name text [unique, not null]
|
||||||
|
description text
|
||||||
|
created_at timestamptz [default: `now()`]
|
||||||
|
last_active_at timestamptz [default: `now()`]
|
||||||
|
}
|
||||||
|
|
||||||
|
Table thought_links {
|
||||||
|
from_id bigint [not null, ref: > thoughts.id]
|
||||||
|
to_id bigint [not null, ref: > thoughts.id]
|
||||||
|
relation text [not null]
|
||||||
|
created_at timestamptz [default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
(from_id, to_id, relation) [pk]
|
||||||
|
from_id
|
||||||
|
to_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table embeddings {
|
||||||
|
id bigserial [pk]
|
||||||
|
guid uuid [unique, not null, default: `gen_random_uuid()`]
|
||||||
|
thought_id uuid [not null, ref: > thoughts.guid]
|
||||||
|
model text [not null]
|
||||||
|
dim int [not null]
|
||||||
|
embedding vector [not null]
|
||||||
|
created_at timestamptz [default: `now()`]
|
||||||
|
updated_at timestamptz [default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
(thought_id, model) [unique]
|
||||||
|
thought_id
|
||||||
|
}
|
||||||
|
}
|
||||||
53
schema/crm.dbml
Normal file
53
schema/crm.dbml
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
Table professional_contacts {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
name text [not null]
|
||||||
|
company text
|
||||||
|
title text
|
||||||
|
email text
|
||||||
|
phone text
|
||||||
|
linkedin_url text
|
||||||
|
how_we_met text
|
||||||
|
tags "text[]" [not null, default: `'{}'`]
|
||||||
|
notes text
|
||||||
|
last_contacted timestamptz
|
||||||
|
follow_up_date date
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
last_contacted
|
||||||
|
follow_up_date
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table contact_interactions {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
contact_id uuid [not null, ref: > professional_contacts.id]
|
||||||
|
interaction_type text [not null]
|
||||||
|
occurred_at timestamptz [not null, default: `now()`]
|
||||||
|
summary text [not null]
|
||||||
|
follow_up_needed boolean [not null, default: false]
|
||||||
|
follow_up_notes text
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
(contact_id, occurred_at)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table opportunities {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
contact_id uuid [ref: > professional_contacts.id]
|
||||||
|
title text [not null]
|
||||||
|
description text
|
||||||
|
stage text [not null, default: 'identified']
|
||||||
|
value "decimal(12,2)"
|
||||||
|
expected_close_date date
|
||||||
|
notes text
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
stage
|
||||||
|
}
|
||||||
|
}
|
||||||
25
schema/files.dbml
Normal file
25
schema/files.dbml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
Table stored_files {
|
||||||
|
id bigserial [pk]
|
||||||
|
guid uuid [unique, not null, default: `gen_random_uuid()`]
|
||||||
|
thought_id uuid [ref: > thoughts.guid]
|
||||||
|
project_id uuid [ref: > projects.guid]
|
||||||
|
name text [not null]
|
||||||
|
media_type text [not null]
|
||||||
|
kind text [not null, default: 'file']
|
||||||
|
encoding text [not null, default: 'base64']
|
||||||
|
size_bytes bigint [not null]
|
||||||
|
sha256 text [not null]
|
||||||
|
content bytea [not null]
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
thought_id
|
||||||
|
project_id
|
||||||
|
sha256
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cross-file refs (for relspecgo merge)
|
||||||
|
Ref: stored_files.thought_id > thoughts.guid [delete: set null]
|
||||||
|
Ref: stored_files.project_id > projects.guid [delete: set null]
|
||||||
31
schema/household.dbml
Normal file
31
schema/household.dbml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
Table household_items {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
name text [not null]
|
||||||
|
category text
|
||||||
|
location text
|
||||||
|
details jsonb [not null, default: `'{}'`]
|
||||||
|
notes text
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
category
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table household_vendors {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
name text [not null]
|
||||||
|
service_type text
|
||||||
|
phone text
|
||||||
|
email text
|
||||||
|
website text
|
||||||
|
notes text
|
||||||
|
rating int
|
||||||
|
last_used date
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
service_type
|
||||||
|
}
|
||||||
|
}
|
||||||
30
schema/maintenance.dbml
Normal file
30
schema/maintenance.dbml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
Table maintenance_tasks {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
name text [not null]
|
||||||
|
category text
|
||||||
|
frequency_days int
|
||||||
|
last_completed timestamptz
|
||||||
|
next_due timestamptz
|
||||||
|
priority text [not null, default: 'medium']
|
||||||
|
notes text
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
next_due
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table maintenance_logs {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
task_id uuid [not null, ref: > maintenance_tasks.id]
|
||||||
|
completed_at timestamptz [not null, default: `now()`]
|
||||||
|
performed_by text
|
||||||
|
cost "decimal(10,2)"
|
||||||
|
notes text
|
||||||
|
next_action text
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
(task_id, completed_at)
|
||||||
|
}
|
||||||
|
}
|
||||||
49
schema/meals.dbml
Normal file
49
schema/meals.dbml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
Table recipes {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
name text [not null]
|
||||||
|
cuisine text
|
||||||
|
prep_time_minutes int
|
||||||
|
cook_time_minutes int
|
||||||
|
servings int
|
||||||
|
ingredients jsonb [not null, default: `'[]'`]
|
||||||
|
instructions jsonb [not null, default: `'[]'`]
|
||||||
|
tags "text[]" [not null, default: `'{}'`]
|
||||||
|
rating int
|
||||||
|
notes text
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
cuisine
|
||||||
|
tags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table meal_plans {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
week_start date [not null]
|
||||||
|
day_of_week text [not null]
|
||||||
|
meal_type text [not null]
|
||||||
|
recipe_id uuid [ref: > recipes.id]
|
||||||
|
custom_meal text
|
||||||
|
servings int
|
||||||
|
notes text
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
week_start
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table shopping_lists {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
week_start date [unique, not null]
|
||||||
|
items jsonb [not null, default: `'[]'`]
|
||||||
|
notes text
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
week_start
|
||||||
|
}
|
||||||
|
}
|
||||||
32
schema/meta.dbml
Normal file
32
schema/meta.dbml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
Table chat_histories {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
session_id text [not null]
|
||||||
|
title text
|
||||||
|
channel text
|
||||||
|
agent_id text
|
||||||
|
project_id uuid [ref: > projects.guid]
|
||||||
|
messages jsonb [not null, default: `'[]'`]
|
||||||
|
summary text
|
||||||
|
metadata jsonb [not null, default: `'{}'`]
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
session_id
|
||||||
|
project_id
|
||||||
|
channel
|
||||||
|
agent_id
|
||||||
|
created_at
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table tool_annotations {
|
||||||
|
id bigserial [pk]
|
||||||
|
tool_name text [unique, not null]
|
||||||
|
notes text [not null, default: '']
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cross-file refs (for relspecgo merge)
|
||||||
|
Ref: chat_histories.project_id > projects.guid [delete: set null]
|
||||||
46
schema/skills.dbml
Normal file
46
schema/skills.dbml
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
Table agent_skills {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
name text [unique, not null]
|
||||||
|
description text [not null, default: '']
|
||||||
|
content text [not null]
|
||||||
|
tags "text[]" [not null, default: `'{}'`]
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
}
|
||||||
|
|
||||||
|
Table agent_guardrails {
|
||||||
|
id uuid [pk, default: `gen_random_uuid()`]
|
||||||
|
name text [unique, not null]
|
||||||
|
description text [not null, default: '']
|
||||||
|
content text [not null]
|
||||||
|
severity text [not null, default: 'medium']
|
||||||
|
tags "text[]" [not null, default: `'{}'`]
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
updated_at timestamptz [not null, default: `now()`]
|
||||||
|
}
|
||||||
|
|
||||||
|
Table project_skills {
|
||||||
|
project_id uuid [not null, ref: > projects.guid]
|
||||||
|
skill_id uuid [not null, ref: > agent_skills.id]
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
(project_id, skill_id) [pk]
|
||||||
|
project_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table project_guardrails {
|
||||||
|
project_id uuid [not null, ref: > projects.guid]
|
||||||
|
guardrail_id uuid [not null, ref: > agent_guardrails.id]
|
||||||
|
created_at timestamptz [not null, default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
(project_id, guardrail_id) [pk]
|
||||||
|
project_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cross-file refs (for relspecgo merge)
|
||||||
|
Ref: project_skills.project_id > projects.guid [delete: cascade]
|
||||||
|
Ref: project_guardrails.project_id > projects.guid [delete: cascade]
|
||||||
Reference in New Issue
Block a user