2626from pathlib import Path
2727from typing import Any
2828
29- from packages .arc42gen .models .config import LLMConfig
30- from packages .arc42gen .providers .factory import create_llm_provider
29+ from packages .cli .agents .llm_client import client_from_settings
3130from packages .cli .graph .neo4j_client import NeoClient
3231from packages .config .settings import Settings
3332
8382_COUNT_EDGES = "MATCH ()-[r:IMPLEMENTS_DOMAIN]->() RETURN count(r) AS c"
8483
8584
86- # ---------------------------------------------------------------------------
87- # LLM config builder (mirrors summarizer pattern)
88- # ---------------------------------------------------------------------------
89-
90- def _build_llm_config (settings : Settings ) -> LLMConfig :
91- provider = settings .LLM_PROVIDER .lower ()
92- if provider == "ollama" :
93- return LLMConfig (
94- provider = "ollama" ,
95- model = settings .LLM_MODEL_OLLAMA ,
96- api_key = "" ,
97- base_url = settings .OLLAMA_BASE_URL ,
98- max_tokens = 2000 ,
99- )
100- elif provider == "anthropic" :
101- return LLMConfig (
102- provider = "anthropic" ,
103- model = LLMConfig .DEFAULT_MODELS ["anthropic" ],
104- api_key = settings .ANTHROPIC_API_KEY ,
105- max_tokens = 2000 ,
106- )
107- elif provider == "gemini" :
108- return LLMConfig (
109- provider = "gemini" ,
110- model = LLMConfig .DEFAULT_MODELS ["gemini" ],
111- api_key = settings .GEMINI_API_KEY ,
112- max_tokens = 2000 ,
113- )
114- else :
115- raise ValueError (
116- f"Unsupported LLM_PROVIDER '{ provider } '. "
117- "Set LLM_PROVIDER to ollama, anthropic, or gemini in .env"
118- )
119-
120-
12185# ---------------------------------------------------------------------------
12286# Prompt builder
12387# ---------------------------------------------------------------------------
@@ -258,8 +222,7 @@ def run_domain_extractor(
258222 progress_cb ("fetch" , f"Loaded { len (nodes )} nodes." )
259223
260224 # 2. Call LLM in batches; merge domains across responses
261- llm_config = _build_llm_config (settings )
262- llm = create_llm_provider (llm_config )
225+ llm = client_from_settings (settings )
263226
264227 merged : dict [str , dict ] = {}
265228 batches = [nodes [i : i + batch_size ] for i in range (0 , len (nodes ), batch_size )]
@@ -269,8 +232,8 @@ def run_domain_extractor(
269232 progress_cb ("llm" , f"LLM call { i } /{ len (batches )} ({ len (batch )} nodes)..." )
270233 prompt = _build_batch_prompt (batch )
271234 try :
272- response = llm .generate (prompt = prompt , temperature = 0.1 )
273- parsed = _extract_json (response . content )
235+ raw_text = llm .complete (prompt , max_tokens = 2000 , temperature = 0.1 )
236+ parsed = _extract_json (raw_text )
274237 domains = parsed .get ("domains" , [])
275238 _merge_domains (merged , domains )
276239 except Exception as exc :
0 commit comments