rag / modules /llm_handler.py
dmytrotm's picture
initial commit
2303ac9
import sys
from pathlib import Path
from typing import List, Dict
import litellm
# Add project root to sys.path
sys.path.append(str(Path(__file__).parent.parent))
import config
class LLMHandler:
def __init__(self, model_name: str = None):
self.model_name = model_name if model_name else config.LLM_MODEL_NAME
def generate_answer(self, query: str, context_chunks: List[Dict], api_key: str, temperature: float = 0.3) -> str:
"""
Generates an answer using the LLM and the provided context.
"""
if not context_chunks:
return "Не знайдено релевантних документів для відповіді."
# Prepare context string
context_text = ""
for i, chunk in enumerate(context_chunks):
meta = chunk['chunk']['metadata']
cit = meta.get('citation_short', f"Doc {meta.get('article_number')}")
title = meta.get('article_title', '')
text = chunk['chunk']['text']
context_text += f"Source [{i+1}]: {cit}"
if title:
context_text += f" ({title})"
context_text += f"\nContent: {text}\n\n"
# Construct messages
messages = [
{"role": "system", "content": config.SYSTEM_PROMPT.format(context=context_text)},
{"role": "user", "content": query}
]
try:
response = litellm.completion(
model=self.model_name,
messages=messages,
api_key=api_key,
temperature=temperature,
max_tokens=1024
)
return response.choices[0].message.content
except Exception as e:
return f"Error communicating with LLM: {str(e)}"