Files
amcs/configs/config.example.yaml
Hein 14e218d784
Some checks failed
CI / build-and-test (push) Failing after -32m22s
test(config): add migration tests for litellm provider
* Implement tests for migrating configuration from v1 to v2 for the litellm provider.
* Validate the structure and values of the migrated configuration.
* Ensure migration rejects newer versions of the configuration.
fix(validate): enhance AI provider validation logic
* Consolidate provider validation into a dedicated method.
* Ensure at least one provider is specified and validate its type.
* Check for required fields based on provider type.
fix(mcpserver): update tool set to use new enrichment tool
* Replace RetryMetadataTool with RetryEnrichmentTool in the ToolSet.
fix(tools): refactor tools to use embedding and metadata runners
* Update tools to utilize EmbeddingRunner and MetadataRunner instead of Provider.
* Adjust method calls to align with the new runner interfaces.
2026-04-21 21:14:28 +02:00

128 lines
2.5 KiB
YAML

version: 2
server:
host: "0.0.0.0"
port: 8080
read_timeout: "10m"
write_timeout: "10m"
idle_timeout: "60s"
allowed_origins:
- "*"
mcp:
path: "/mcp"
sse_path: "/sse"
server_name: "amcs"
transport: "streamable_http"
session_timeout: "10m"
auth:
header_name: "x-brain-key"
query_param: "key"
allow_query_param: false
keys:
- id: "local-client"
value: "replace-me"
description: "main local client key"
oauth:
clients:
- id: "oauth-client"
client_id: ""
client_secret: ""
description: "optional OAuth client credentials"
database:
url: "postgres://postgres:postgres@localhost:5432/amcs?sslmode=disable"
max_conns: 10
min_conns: 2
max_conn_lifetime: "30m"
max_conn_idle_time: "10m"
ai:
providers:
default:
type: "litellm"
base_url: "http://localhost:4000/v1"
api_key: "replace-me"
request_headers: {}
ollama_local:
type: "ollama"
base_url: "http://localhost:11434/v1"
api_key: "ollama"
request_headers: {}
openrouter:
type: "openrouter"
base_url: "https://openrouter.ai/api/v1"
api_key: "replace-me"
app_name: "amcs"
site_url: ""
request_headers: {}
embeddings:
dimensions: 1536
primary:
provider: "default"
model: "openai/text-embedding-3-small"
fallbacks:
- provider: "ollama_local"
model: "nomic-embed-text"
metadata:
temperature: 0.1
log_conversations: false
timeout: "10s"
primary:
provider: "default"
model: "gpt-4o-mini"
fallbacks:
- provider: "openrouter"
model: "openai/gpt-4.1-mini"
# Optional overrides for background jobs (backfill_embeddings,
# retry_failed_metadata, reparse_thought_metadata).
background:
embeddings:
primary:
provider: "default"
model: "openai/text-embedding-3-small"
metadata:
primary:
provider: "default"
model: "gpt-4o-mini"
capture:
source: "mcp"
metadata_defaults:
type: "observation"
topic_fallback: "uncategorized"
search:
default_limit: 10
default_threshold: 0.5
max_limit: 50
logging:
level: "info"
format: "json"
observability:
metrics_enabled: true
pprof_enabled: false
backfill:
enabled: false
run_on_startup: false
interval: "15m"
batch_size: 20
max_per_run: 100
include_archived: false
metadata_retry:
enabled: false
run_on_startup: false
interval: "24h"
max_per_run: 100
include_archived: false