-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathllm_client.py
More file actions
72 lines (59 loc) · 2.19 KB
/
llm_client.py
File metadata and controls
72 lines (59 loc) · 2.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
"""
Thin wrapper around OpenAI API for LLM calls and embeddings.
"""
import os
import json
import time
from typing import List, Optional
from dotenv import load_dotenv
from openai import OpenAI
load_dotenv()
client = OpenAI()
def get_embedding(text: str, model: str = "text-embedding-3-small", verbose: bool = False) -> List[float]:
"""Get embedding for a single text string."""
text = text.replace("\n", " ").strip()
if not text:
return [0.0] * 1536
if verbose:
print(f"[Embedding API call: {len(text)} chars]", end=" ", flush=True)
response = client.embeddings.create(input=[text], model=model)
return response.data[0].embedding
def get_embeddings_batch(texts: List[str], model: str = "text-embedding-3-small") -> List[List[float]]:
"""Get embeddings for a batch of texts."""
cleaned = [t.replace("\n", " ").strip() or "empty" for t in texts]
response = client.embeddings.create(input=cleaned, model=model)
return [d.embedding for d in response.data]
def llm_call(
prompt: str,
system: str = "You are a helpful assistant.",
model: str = "gpt-4o",
temperature: Optional[float] = None,
max_tokens: int = 2048,
json_mode: bool = False,
max_retries: int = 1,
) -> str:
"""Single LLM call."""
kwargs = {
"model": model,
"messages": [
{"role": "system", "content": system},
{"role": "user", "content": prompt},
],
}
_ = temperature # intentionally ignored for compatibility with existing callers
kwargs["max_completion_tokens"] = max_tokens
if json_mode:
kwargs["response_format"] = {"type": "json_object"}
for attempt in range(max_retries):
try:
response = client.chat.completions.create(**kwargs)
return response.choices[0].message.content
except Exception as e:
if attempt < max_retries - 1:
time.sleep(2 ** attempt)
else:
raise e
def llm_call_json(prompt: str, system: str = "You are a helpful assistant.", **kwargs) -> dict:
"""LLM call that returns parsed JSON."""
raw = llm_call(prompt, system=system, json_mode=True, **kwargs)
return json.loads(raw)