* Update README with Ollama integration details * Add Ollama configuration to example YAML files * Implement Ollama provider in AI factory * Add tests for Ollama provider functionality * Enhance config validation for Ollama settings
77 lines
1.5 KiB
YAML
77 lines
1.5 KiB
YAML
server:
|
|
host: "0.0.0.0"
|
|
port: 8080
|
|
read_timeout: "15s"
|
|
write_timeout: "30s"
|
|
idle_timeout: "60s"
|
|
allowed_origins:
|
|
- "*"
|
|
|
|
mcp:
|
|
path: "/mcp"
|
|
server_name: "amcs"
|
|
version: "0.1.0"
|
|
transport: "streamable_http"
|
|
|
|
auth:
|
|
mode: "api_keys"
|
|
header_name: "x-brain-key"
|
|
query_param: "key"
|
|
allow_query_param: false
|
|
keys:
|
|
- id: "local-client"
|
|
value: "replace-me"
|
|
description: "main local client key"
|
|
|
|
database:
|
|
url: "postgres://postgres:postgres@db:5432/amcs?sslmode=disable"
|
|
max_conns: 10
|
|
min_conns: 2
|
|
max_conn_lifetime: "30m"
|
|
max_conn_idle_time: "10m"
|
|
|
|
ai:
|
|
provider: "litellm"
|
|
embeddings:
|
|
model: "openai/text-embedding-3-small"
|
|
dimensions: 1536
|
|
metadata:
|
|
model: "gpt-4o-mini"
|
|
temperature: 0.1
|
|
litellm:
|
|
base_url: "http://host.containers.internal:4000/v1"
|
|
api_key: "replace-me"
|
|
use_responses_api: false
|
|
request_headers: {}
|
|
embedding_model: "openrouter/openai/text-embedding-3-small"
|
|
metadata_model: "gpt-4o-mini"
|
|
ollama:
|
|
base_url: "http://host.containers.internal:11434/v1"
|
|
api_key: "ollama"
|
|
request_headers: {}
|
|
openrouter:
|
|
base_url: "https://openrouter.ai/api/v1"
|
|
api_key: ""
|
|
app_name: "amcs"
|
|
site_url: ""
|
|
extra_headers: {}
|
|
|
|
capture:
|
|
source: "mcp"
|
|
metadata_defaults:
|
|
type: "observation"
|
|
topic_fallback: "uncategorized"
|
|
|
|
search:
|
|
default_limit: 10
|
|
default_threshold: 0.5
|
|
max_limit: 50
|
|
|
|
logging:
|
|
level: "info"
|
|
format: "json"
|
|
|
|
observability:
|
|
metrics_enabled: true
|
|
pprof_enabled: false
|