Skip to content
This repository was archived by the owner on Feb 16, 2026. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 12 additions & 7 deletions doc_comments_ai/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,11 @@ def run():
default="http://localhost:11434",
help="Ollama base url",
)
parser.add_argument(
"--max-tokens",
type=int,
help="Maximum number of tokens to generate. Defaults depend on the model",
)

if sys.argv.__len__() < 2:
sys.exit("Please provide a file")
Expand All @@ -74,17 +79,17 @@ def run():

if args.azure_deployment:
utils.is_azure_openai_environment_available()
llm_wrapper = llm.LLM(azure_deployment=args.azure_deployment)
llm_wrapper = llm.LLM(azure_deployment=args.azure_deployment, max_tokens=args.max_tokens)
elif args.gpt4:
utils.is_openai_api_key_available()
llm_wrapper = llm.LLM(model=GptModel.GPT_4)
llm_wrapper = llm.LLM(model=GptModel.GPT_4, max_tokens=args.max_tokens)
elif args.gpt3_5_16k:
utils.is_openai_api_key_available()
llm_wrapper = llm.LLM(model=GptModel.GPT_35_16K)
llm_wrapper = llm.LLM(model=GptModel.GPT_35_16K, max_tokens=args.max_tokens)
elif args.ollama_model:
llm_wrapper = llm.LLM(ollama=(args.ollama_base_url, args.ollama_model))
llm_wrapper = llm.LLM(ollama=(args.ollama_base_url, args.ollama_model), max_tokens=args.max_tokens)
else:
llm_wrapper = llm.LLM(local_model=args.local_model)
llm_wrapper = llm.LLM(local_model=args.local_model, max_tokens=args.max_tokens)

generated_doc_comments = {}

Expand All @@ -102,13 +107,13 @@ def run():

for node in treesitterNodes:
method_name = utils.get_bold_text(node.name)

if node.doc_comment:
print(
f"⚠️ Method {method_name} already has a doc comment. Skipping..."
)
continue

if args.guided:
print(f"Generate doc for {utils.get_bold_text(method_name)}? (y/n)")
if not input().lower() == "y":
Expand Down
38 changes: 31 additions & 7 deletions doc_comments_ai/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,40 +18,64 @@ class GptModel(Enum):
GPT_4 = "gpt-4"


class LLM:
class LLM:
def __init__(
self,
model: GptModel = GptModel.GPT_35,
local_model: "str | None" = None,
azure_deployment: "str | None" = None,
ollama: "tuple[str,str] | None" = None,
max_tokens: int | None = None,
):
max_tokens = 2048 if model == GptModel.GPT_35 else 4096

model_ctx_map = {
GptModel.GPT_35: 4096,
GptModel.GPT_35_16K: 16384,
GptModel.GPT_4: 8192,
}

n_ctx = model_ctx_map.get(model, 8192)
default_max_tokens = 2048 if model == GptModel.GPT_35 else 8192

if max_tokens is None:
max_tokens = default_max_tokens

if max_tokens > n_ctx:
print(
f"⚠️ Warning: max_tokens={max_tokens} is larger than allowed n_ctx={n_ctx}. "
f"Using n_ctx instead."
)
max_tokens = n_ctx

self.max_tokens = max_tokens
self.n_ctx = n_ctx

if local_model is not None:
self.install_llama_cpp()

self.llm = LlamaCpp(
model_path=local_model,
temperature=0.8,
max_tokens=max_tokens,
max_tokens=self.max_tokens,
verbose=False,
n_ctx=self.n_ctx,
)
elif azure_deployment is not None:
self.llm = ChatLiteLLM(
temperature=0.8,
max_tokens=max_tokens,
max_tokens=self.max_tokens,
model=f"azure/{azure_deployment}",
n_ctx=self.n_ctx,
)
elif ollama is not None:
self.llm = Ollama(
base_url=ollama[0],
model=ollama[1],
temperature=0.8,
num_ctx=max_tokens,
num_ctx=self.max_tokens, # Ollama nutzt num_ctx
)
else:
self.llm = ChatLiteLLM(
temperature=0.8, max_tokens=max_tokens, model=model.value
temperature=0.8, max_tokens=self.max_tokens, model=model.value
)
self.template = (
"Add a detailed doc comment to the following {language} method:\n{code}\n"
Expand Down