feat(ai): add support for Ollama AI provider configuration
* Update README with Ollama integration details * Add Ollama configuration to example YAML files * Implement Ollama provider in AI factory * Add tests for Ollama provider functionality * Enhance config validation for Ollama settings
This commit is contained in:
@@ -69,3 +69,51 @@ logging:
|
||||
t.Fatalf("server port = %d, want 9090", cfg.Server.Port)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAppliesOllamaEnvOverrides(t *testing.T) {
|
||||
configPath := filepath.Join(t.TempDir(), "test.yaml")
|
||||
if err := os.WriteFile(configPath, []byte(`
|
||||
server:
|
||||
port: 8080
|
||||
mcp:
|
||||
path: "/mcp"
|
||||
auth:
|
||||
keys:
|
||||
- id: "test"
|
||||
value: "secret"
|
||||
database:
|
||||
url: "postgres://from-file"
|
||||
ai:
|
||||
provider: "ollama"
|
||||
embeddings:
|
||||
model: "nomic-embed-text"
|
||||
dimensions: 768
|
||||
metadata:
|
||||
model: "llama3.2"
|
||||
ollama:
|
||||
base_url: "http://localhost:11434/v1"
|
||||
api_key: "ollama"
|
||||
search:
|
||||
default_limit: 10
|
||||
max_limit: 50
|
||||
logging:
|
||||
level: "info"
|
||||
`), 0o600); err != nil {
|
||||
t.Fatalf("write config: %v", err)
|
||||
}
|
||||
|
||||
t.Setenv("OB1_OLLAMA_BASE_URL", "https://ollama.example.com/v1")
|
||||
t.Setenv("OB1_OLLAMA_API_KEY", "remote-key")
|
||||
|
||||
cfg, _, err := Load(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Load() error = %v", err)
|
||||
}
|
||||
|
||||
if cfg.AI.Ollama.BaseURL != "https://ollama.example.com/v1" {
|
||||
t.Fatalf("ollama base url = %q, want env override", cfg.AI.Ollama.BaseURL)
|
||||
}
|
||||
if cfg.AI.Ollama.APIKey != "remote-key" {
|
||||
t.Fatalf("ollama api key = %q, want env override", cfg.AI.Ollama.APIKey)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user