diff --git a/.gitignore b/.gitignore index 7dbdfee..5084528 100644 --- a/.gitignore +++ b/.gitignore @@ -135,6 +135,16 @@ venv/ ENV/ env.bak/ venv.bak/ +pyenv/ +*.env + +# VS Code +.vscode/ +.history/ +*.code-workspace + +# Specstory +.specstory/ # Spyder project settings .spyderproject diff --git a/README.md b/README.md index 1038d38..94169cc 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Grok3 API -Grok3 is cool, smart, and useful, but there is no official API available. This is an **unofficial Python client** for interacting with the Grok 3 API. It leverages cookies from browser requests to authenticate and access the API endpoints. +Grok3 is cool, smart, and useful, but there is no official API available. This is an **unofficial Python client** for interacting with the Grok 3 API. It leverages cookies from browser requests to authenticate and access the API endpoints. The API also provides OpenAI-compatible endpoints for easy integration with existing applications. --- @@ -49,9 +49,11 @@ Example cookie string from a curl command: ### 4. Use the Client +#### 4.1 Direct Client Usage + Pass the extracted cookie values to the GrokClient and send a message: -``` +```python from grok_client import GrokClient # Your cookie values @@ -71,6 +73,64 @@ response = client.send_message("write a poem") print(response) ``` +#### 4.2 OpenAI-Compatible API Server + +The package includes an OpenAI-compatible API server that allows you to use Grok with any OpenAI-compatible client library or application. + +##### Start the Server + +1. Create a `.env` file in the project directory using the provided `.env.example` template: +```bash +cp grok_client/.env.example .env +``` + +2. Update the `.env` file with your Grok cookie values: +```env +GROK_SSO=your_sso_cookie +GROK_SSO_RW=your_sso_rw_cookie +# Optional configurations +API_HOST=127.0.0.1 +API_PORT=8000 +MODEL_NAME=grok-3 +``` + +3. Start the API server: +```bash +uvicorn grok_client.server:app --reload --host 0.0.0.0 --port 8000 +``` + +##### Use with OpenAI Python Client + +```python +from openai import OpenAI + +# Initialize client pointing to local server +client = OpenAI( + base_url="http://localhost:8000/v1", + api_key="dummy-key" # Any non-empty string will work +) + +# Create a chat completion +response = client.chat.completions.create( + model="grok-3", # Model name can be configured in .env + messages=[ + {"role": "user", "content": "Hello, how can you help me?"} + ] +) + +print(response.choices[0].message.content) +``` + +##### Interactive Chat Script + +The package includes an interactive chat script that uses the OpenAI-compatible endpoint: + +```bash +python grok_client/interactive.py +``` + +This provides a command-line interface for chatting with Grok using the OpenAI-compatible API. + ### 5. Optional: Add Memory with Mem0 If you want Grok to remember conversations, you can integrate it with Mem0. Mem0 provides a memory layer for AI applications. diff --git a/grok_client/.env.example b/grok_client/.env.example new file mode 100644 index 0000000..9dd9afb --- /dev/null +++ b/grok_client/.env.example @@ -0,0 +1,14 @@ +# Grok API Configuration + +# API Server Configuration +API_HOST=127.0.0.1 +API_PORT=8000 + +# Grok Model Configuration +MODEL_NAME=grok-3 + +# Authentication Cookies +# Replace these with your actual Grok cookies +# You can obtain these from your browser after logging into Grok +GROK_SSO=your_sso_cookie_value_here +GROK_SSO_RW=your_sso_rw_cookie_value_here \ No newline at end of file diff --git a/grok_client/client.py b/grok_client/client.py index 1f2510f..03dc648 100644 --- a/grok_client/client.py +++ b/grok_client/client.py @@ -1,5 +1,12 @@ import requests import json +import time +import logging +import re + +# Set up logging +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) class GrokClient: def __init__(self, cookies): @@ -8,14 +15,24 @@ def __init__(self, cookies): Args: cookies (dict): Dictionary containing cookie values - - x-anonuserid - - x-challenge - - x-signature - sso - sso-rw """ self.base_url = "https://grok.com/rest/app-chat/conversations/new" - self.cookies = cookies + + # Convert cookie string to dict if needed + if isinstance(cookies.get('Cookie'), str): + cookie_dict = {} + for cookie in cookies.get('Cookie', '').split(';'): + if cookie.strip(): + name, value = cookie.strip().split('=', 1) + cookie_dict[name.strip()] = value.strip() + self.cookies = cookie_dict + else: + self.cookies = cookies + + logger.debug(f"Using cookies: {self.cookies}") + self.headers = { "accept": "*/*", "accept-language": "en-GB,en;q=0.9", @@ -25,28 +42,29 @@ def __init__(self, cookies): "referer": "https://grok.com/", "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126", "Brave";v="126"', "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"macOS"', + "sec-ch-ua-platform": '"Windows"', "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "sec-gpc": "1", - "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" } + logger.debug(f"Initialized GrokClient with headers: {self.headers}") def _prepare_payload(self, message): """Prepare the default payload with the user's message""" - return { + payload = { "temporary": False, "modelName": "grok-3", "message": message, "fileAttachments": [], "imageAttachments": [], "disableSearch": False, - "enableImageGeneration": True, + "enableImageGeneration": False, "returnImageBytes": False, "returnRawGrokInXaiRequest": False, - "enableImageStreaming": True, - "imageGenerationCount": 2, + "enableImageStreaming": False, + "imageGenerationCount": 0, "forceConcise": False, "toolOverrides": {}, "enableSideBySide": True, @@ -56,6 +74,31 @@ def _prepare_payload(self, message): "deepsearchPreset": "", "isReasoning": False } + logger.debug(f"Prepared payload: {payload}") + return payload + + def _clean_json_response(self, response): + """Clean up JSON response by removing markdown and code blocks""" + # Remove markdown code blocks + response = re.sub(r'```json\s*', '', response) + response = re.sub(r'```\s*$', '', response) + + try: + # Try to parse as JSON + json_data = json.loads(response) + + # If the response has a nested response or function_call, extract it + if isinstance(json_data, dict): + if "response" in json_data: + json_data = json_data["response"] + elif "function_call" in json_data: + json_data = json_data["function_call"]["arguments"] + if isinstance(json_data, str): + json_data = json.loads(json_data) + + return json.dumps(json_data, indent=2) + except json.JSONDecodeError: + return response def send_message(self, message): """ @@ -67,33 +110,88 @@ def send_message(self, message): Returns: str: The complete response from Grok """ - payload = self._prepare_payload(message) - response = requests.post( - self.base_url, - headers=self.headers, - cookies=self.cookies, - json=payload, - stream=True - ) - - full_response = "" - - for line in response.iter_lines(): - if line: - decoded_line = line.decode('utf-8') - try: - json_data = json.loads(decoded_line) - result = json_data.get("result", {}) - response_data = result.get("response", {}) + try: + logger.debug(f"Sending message to Grok: {message}") + payload = self._prepare_payload(message) + + logger.debug(f"Making POST request to {self.base_url}") + logger.debug(f"Using cookies: {self.cookies}") + + session = requests.Session() + for cookie_name, cookie_value in self.cookies.items(): + session.cookies.set(cookie_name, cookie_value) + + response = session.post( + self.base_url, + headers=self.headers, + json=payload, + stream=True + ) + + logger.debug(f"Response status code: {response.status_code}") + response.raise_for_status() # Raise an exception for bad status codes + + full_response = "" + last_response = None - if "modelResponse" in response_data: - return response_data["modelResponse"]["message"] + logger.debug("Processing response stream...") + for line in response.iter_lines(): + if line: + try: + decoded_line = line.decode('utf-8') + logger.debug(f"Received line: {decoded_line}") + + json_data = json.loads(decoded_line) + logger.debug(f"Parsed JSON: {json_data}") + + # Check for error in response + if "error" in json_data: + error_msg = json_data["error"] + logger.error(f"Error in response: {error_msg}") + raise Exception(error_msg) + + result = json_data.get("result", {}) + response_data = result.get("response", {}) + logger.debug(f"Response data: {response_data}") - token = response_data.get("token", "") - if token: - full_response += token + # Check for complete response + if "modelResponse" in response_data: + complete_response = response_data["modelResponse"].get("message", "") + if complete_response: + logger.debug(f"Got complete response: {complete_response}") + return self._clean_json_response(complete_response) + + # Collect streaming tokens + token = response_data.get("token", "") + if token: + full_response += token + last_response = full_response # Keep track of last valid response + logger.debug(f"Current response: {full_response}") + + except json.JSONDecodeError as e: + logger.warning(f"Failed to decode JSON: {e}") + continue + except Exception as e: + logger.error(f"Error processing response line: {e}") + if str(e): # If we have an error message + raise Exception(f"Error in response: {str(e)}") + continue - except json.JSONDecodeError: - continue + # Return the last valid response if we have one + if last_response: + logger.debug(f"Returning last valid response: {last_response.strip()}") + return self._clean_json_response(last_response.strip()) + + # If we got here without a response, raise an exception + logger.error("No valid response received from Grok API") + raise Exception("No valid response received from Grok API") + + except requests.exceptions.RequestException as e: + logger.error(f"Request failed: {e}") + raise Exception(f"Request failed: {str(e)}") + except Exception as e: + logger.error(f"Failed to process response: {e}") + raise Exception(f"Failed to process response: {str(e)}") - return full_response.strip() \ No newline at end of file + logger.warning("Returning empty response as fallback") + return "" # Fallback empty response \ No newline at end of file diff --git a/grok_client/interactive.py b/grok_client/interactive.py new file mode 100644 index 0000000..f58be7e --- /dev/null +++ b/grok_client/interactive.py @@ -0,0 +1,103 @@ +import os +import sys +import logging +from dotenv import load_dotenv +from openai import OpenAI + +# Set up logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +def setup_client(): + """Set up and return the OpenAI client with Grok API configuration""" + # Load environment variables + load_dotenv() + + # Get configuration from environment + api_host = os.getenv('API_HOST', '127.0.0.1') + api_port = os.getenv('API_PORT', '8000') + model_name = os.getenv('MODEL_NAME', 'grok-3') + grok_sso = os.getenv('GROK_SSO') + grok_sso_rw = os.getenv('GROK_SSO_RW') + + if not all([grok_sso, grok_sso_rw]): + logger.error("Missing required environment variables. Please check your .env file.") + logger.info("Required variables: GROK_SSO, GROK_SSO_RW") + logger.info("Optional variables: API_HOST, API_PORT, MODEL_NAME") + sys.exit(1) + + # Initialize OpenAI client with local endpoint + client = OpenAI( + base_url=f"http://{api_host}:{api_port}/v1", + api_key="dummy-key", # Not used but required + default_headers={ + "Cookie": f"sso={grok_sso}; sso-rw={grok_sso_rw}" + } + ) + + return client, model_name + +def interactive_chat(): + """Run an interactive chat session with Grok""" + client, model_name = setup_client() + + print("\n===== Grok Interactive Chat =====") + print("Type 'exit', 'quit', or Ctrl+C to end the conversation.") + print("Type 'clear' to start a new conversation.") + print("==============================\n") + + # Initialize conversation history + conversation = [] + + try: + while True: + # Get user input + user_input = input("\nYou: ") + + # Check for exit commands + if user_input.lower() in ['exit', 'quit']: + print("\nExiting chat. Goodbye!") + break + + # Check for clear command + if user_input.lower() == 'clear': + conversation = [] + print("\nConversation history cleared.") + continue + + # Add user message to conversation + conversation.append({"role": "user", "content": user_input}) + + try: + # Send request to Grok API + print("\nGrok: ", end="", flush=True) + + # Use streaming for a more interactive experience + stream = client.chat.completions.create( + model=model_name, + messages=conversation, + stream=True + ) + + # Collect the full response while streaming + full_response = "" + for chunk in stream: + if chunk.choices[0].delta.content: + content = chunk.choices[0].delta.content + print(content, end="", flush=True) + full_response += content + + print() # Add a newline after the response + + # Add assistant response to conversation history + conversation.append({"role": "assistant", "content": full_response}) + + except Exception as e: + logger.error(f"Error: {str(e)}") + print(f"\nAn error occurred: {str(e)}") + + except KeyboardInterrupt: + print("\n\nExiting chat. Goodbye!") + +if __name__ == "__main__": + interactive_chat() \ No newline at end of file diff --git a/grok_client/interactive_chat.py b/grok_client/interactive_chat.py new file mode 100644 index 0000000..eecec16 --- /dev/null +++ b/grok_client/interactive_chat.py @@ -0,0 +1,188 @@ +import os +import sys +import logging +import argparse +from dotenv import load_dotenv +from .grok_openai_client import GrokOpenAIClient + +# Set up logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +def parse_arguments(): + """ + Parse command line arguments for the interactive chat application. + + Returns: + argparse.Namespace: The parsed command line arguments. + """ + parser = argparse.ArgumentParser(description='Interactive chat with Grok API using OpenAI-compatible interface') + parser.add_argument('--host', help='API host (default: from .env or 127.0.0.1)') + parser.add_argument('--port', help='API port (default: from .env or 8000)') + parser.add_argument('--model', help='Model name (default: from .env or grok-3)') + parser.add_argument('--sso', help='SSO token (default: from .env)') + parser.add_argument('--sso-rw', help='SSO-RW token (default: from .env)') + parser.add_argument('--json', action='store_true', help='Request responses in JSON format') + parser.add_argument('--system', help='Custom system message') + parser.add_argument('--temperature', type=float, default=1.0, help='Temperature for response generation (default: 1.0)') + + return parser.parse_args() + +def setup_client(args): + """ + Set up the Grok OpenAI client using command line arguments or environment variables. + + Args: + args (argparse.Namespace): The parsed command line arguments. + + Returns: + GrokOpenAIClient: The initialized client. + """ + try: + # Initialize client with args or environment variables + client = GrokOpenAIClient( + api_host=args.host, + api_port=args.port, + model_name=args.model, + sso_token=args.sso, + sso_rw_token=args.sso_rw, + load_from_env=True # Always try to load from env first + ) + + return client + except ValueError as e: + logger.error(f"Error initializing client: {e}") + logger.info("Make sure you have the required environment variables set in .env file or provided as arguments.") + logger.info("Required variables: GROK_SSO, GROK_SSO_RW") + logger.info("Optional variables: API_HOST, API_PORT, MODEL_NAME") + sys.exit(1) + +def interactive_chat(): + """ + Run an interactive chat session with Grok using the OpenAI-compatible interface. + """ + # Parse command line arguments + args = parse_arguments() + + # Set up client + client = setup_client(args) + model_name = client.model_name + + # Set up system message + system_message = args.system + if args.json and not system_message: + system_message = "You are a helpful assistant that always responds in valid JSON format." + elif not system_message: + system_message = "You are a helpful assistant." + + print(f"\n===== Grok Interactive Chat ({model_name}) =====") + print("Type 'exit', 'quit', or Ctrl+C to end the conversation.") + print("Type 'clear' to start a new conversation.") + print("Type '/help' to see available commands.") + print("==============================\n") + + # Initialize conversation history + conversation = [] + if system_message: + conversation.append({"role": "system", "content": system_message}) + + try: + while True: + # Get user input + user_input = input("\nYou: ") + + # Check for exit commands + if user_input.lower() in ['exit', 'quit']: + print("\nExiting chat. Goodbye!") + break + + # Check for clear command + if user_input.lower() == 'clear': + conversation = [] + if system_message: + conversation.append({"role": "system", "content": system_message}) + print("\nConversation history cleared.") + continue + + # Check for help command + if user_input.lower() == '/help': + print("\nAvailable commands:") + print(" exit, quit - Exit the chat") + print(" clear - Clear conversation history") + print(" /help - Show this help message") + print(" /json - Toggle JSON response format") + print(" /temp - Set temperature (0.0-2.0)") + print(" /system - Set system message") + continue + + # Check for JSON toggle command + if user_input.lower() == '/json': + args.json = not args.json + print(f"\nJSON response format: {'enabled' if args.json else 'disabled'}") + continue + + # Check for temperature command + if user_input.lower().startswith('/temp '): + try: + new_temp = float(user_input.split(' ', 1)[1]) + if 0.0 <= new_temp <= 2.0: + args.temperature = new_temp + print(f"\nTemperature set to: {args.temperature}") + else: + print("\nTemperature must be between 0.0 and 2.0") + except (ValueError, IndexError): + print("\nInvalid temperature value. Format: /temp 0.7") + continue + + # Check for system message command + if user_input.lower().startswith('/system '): + system_message = user_input.split(' ', 1)[1] + # Update the system message in the conversation + conversation = [msg for msg in conversation if msg["role"] != "system"] + conversation.insert(0, {"role": "system", "content": system_message}) + print(f"\nSystem message updated.") + continue + + # Add user message to conversation + conversation.append({"role": "user", "content": user_input}) + + try: + # Send request to Grok API + print("\nGrok: ", end="", flush=True) + + # Prepare request parameters + params = { + "messages": conversation, + "stream": True, + "temperature": args.temperature + } + + # Add JSON format if requested + if args.json: + params["response_format"] = {"type": "json_object"} + + # Use streaming for a more interactive experience + stream = client.chat_completion(**params) + + # Process the streaming response + full_response = client.process_streaming_response(stream) + + # Add assistant response to conversation history + conversation.append({"role": "assistant", "content": full_response}) + + except Exception as e: + logger.error(f"Error: {str(e)}") + print(f"\nAn error occurred: {str(e)}") + + except KeyboardInterrupt: + print("\n\nExiting chat. Goodbye!") + +def main(): + # Load environment variables + load_dotenv() + + # Run interactive chat + interactive_chat() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/grok_client/server.py b/grok_client/server.py new file mode 100644 index 0000000..097174b --- /dev/null +++ b/grok_client/server.py @@ -0,0 +1,264 @@ +from fastapi import FastAPI, HTTPException, Request +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import StreamingResponse, JSONResponse +from typing import List, Optional, Dict, Any, Union +from pydantic import BaseModel, Field +from .client import GrokClient +import json +import time +import logging + +# Set up logging +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +app = FastAPI() + +# Enable CORS +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +class ChatMessage(BaseModel): + role: str + content: str + function_call: Optional[Dict[str, Any]] = None + +class FunctionCall(BaseModel): + name: str + arguments: str + +class Function(BaseModel): + name: str + description: str + parameters: Dict[str, Any] + +class ChatCompletionRequest(BaseModel): + model: str + messages: List[ChatMessage] + stream: Optional[bool] = False + temperature: Optional[float] = 1.0 + max_tokens: Optional[int] = None + functions: Optional[List[Function]] = None + function_call: Optional[Union[str, Dict[str, str]]] = None + response_format: Optional[Dict[str, str]] = None + +class ChatCompletionChoice(BaseModel): + index: int = 0 + message: ChatMessage + finish_reason: str = "stop" + +class ChatCompletionResponse(BaseModel): + id: str + object: str = "chat.completion" + created: int + model: str + choices: List[ChatCompletionChoice] + +class DeltaMessage(BaseModel): + role: Optional[str] = None + content: Optional[str] = None + function_call: Optional[Dict[str, Any]] = None + +class ChatCompletionChunk(BaseModel): + id: str + object: str = "chat.completion.chunk" + created: int + model: str + choices: List[Dict[str, Any]] + +class GrokAPI: + def __init__(self, cookies: Dict[str, str]): + self.client = GrokClient(cookies) + + def _prepare_system_message(self, request: ChatCompletionRequest) -> str: + # Default to simple responses unless specifically asked for structured output + system_content = "You are a helpful assistant. Provide direct, simple answers to questions." + + # Add function calling instructions if needed + if request.functions: + system_content = "You are a helpful assistant that provides structured data." + system_content += f" Available functions: {[f.name for f in request.functions]}" + system_content += f" Function schemas: {json.dumps([f.dict() for f in request.functions])}" + + # Add JSON format instructions if needed + elif request.response_format and request.response_format.get("type") == "json_object": + system_content = "You are a helpful assistant that always responds in valid JSON format." + + return system_content + + def stream_chat(self, request: ChatCompletionRequest): + try: + # Prepare the conversation context + system_msg = self._prepare_system_message(request) + conversation = f"system: {system_msg}\n" + "\n".join([f"{msg.role}: {msg.content}" for msg in request.messages]) + + logger.debug(f"Sending conversation to Grok: {conversation}") + + # Get streaming response from Grok + response_stream = self.client.send_message(conversation) + logger.debug(f"Got response stream from Grok: {response_stream}") + + # Stream the response in OpenAI format + for token in response_stream.split(): + chunk = ChatCompletionChunk( + id="chatcmpl-" + str(int(time.time())), + created=int(time.time()), + model="grok-3", + choices=[{ + "index": 0, + "delta": {"content": token + " "}, + "finish_reason": None + }] + ) + yield f"data: {json.dumps(chunk.dict())}\n\n" + + # Send the final chunk + final_chunk = ChatCompletionChunk( + id="chatcmpl-final", + created=int(time.time()), + model="grok-3", + choices=[{ + "index": 0, + "delta": {}, + "finish_reason": "stop" + }] + ) + yield f"data: {json.dumps(final_chunk.dict())}\n\n" + yield "data: [DONE]\n\n" + except Exception as e: + logger.error(f"Error in stream_chat: {str(e)}") + yield f"data: {json.dumps({'error': str(e)})}\n\n" + yield "data: [DONE]\n\n" + +@app.get("/v1/models") +async def list_models(): + return { + "data": [ + { + "id": "grok-3", + "object": "model", + "created": int(time.time()), + "owned_by": "xai", + "permission": [], + "root": "grok-3", + "parent": None + } + ] + } + +@app.post("/v1/chat/completions") +async def create_chat_completion(raw_request: Request): + try: + # Get request body + body = await raw_request.json() + logger.debug(f"Received request body: {body}") + + # Parse request into ChatCompletionRequest + request = ChatCompletionRequest(**body) + + # Get cookies from request headers + headers = dict(raw_request.headers) + logger.debug(f"Received headers: {headers}") + + cookies = {'Cookie': headers.get('cookie', '')} if headers.get('cookie') else {} + logger.debug(f"Extracted cookies: {cookies}") + + if not cookies: + raise HTTPException(status_code=401, detail="No authentication cookies provided") + + # Initialize Grok API with cookies + grok = GrokAPI(cookies) + + if request.stream: + return StreamingResponse( + grok.stream_chat(request), + media_type="text/event-stream" + ) + + # For non-streaming response + system_msg = grok._prepare_system_message(request) + conversation = f"system: {system_msg}\n" + "\n".join([f"{msg.role}: {msg.content}" for msg in request.messages]) + logger.debug(f"Sending conversation to Grok: {conversation}") + + response = grok.client.send_message(conversation) + logger.debug(f"Received response from Grok: {response}") + + if not response: + logger.error("Empty response from Grok API") + raise HTTPException(status_code=500, detail="Empty response from Grok API") + + # Handle function calling + if request.functions and request.function_call: + try: + # Try to parse the response as JSON + parsed_response = json.loads(response) + + # Get the function name from the request + function_name = request.function_call.get("name", request.functions[0].name) if isinstance(request.function_call, dict) else request.functions[0].name + + message = ChatMessage( + role="assistant", + content="", + function_call={ + "name": function_name, + "arguments": json.dumps(parsed_response) + } + ) + except json.JSONDecodeError: + # If response is not valid JSON, wrap it in a basic structure + function_name = request.function_call.get("name", request.functions[0].name) if isinstance(request.function_call, dict) else request.functions[0].name + message = ChatMessage( + role="assistant", + content="", + function_call={ + "name": function_name, + "arguments": json.dumps({"result": response}) + } + ) + else: + # Regular response or JSON format + if request.response_format and request.response_format.get("type") == "json_object": + try: + # Ensure the response is valid JSON + json.loads(response) + message = ChatMessage( + role="assistant", + content=response + ) + except json.JSONDecodeError: + # If not valid JSON, wrap it in a JSON structure + message = ChatMessage( + role="assistant", + content=json.dumps({"response": response}) + ) + else: + message = ChatMessage( + role="assistant", + content=response + ) + + # Create response object + chat_response = ChatCompletionResponse( + id=f"chatcmpl-{str(int(time.time()))}", + created=int(time.time()), + model=request.model, + choices=[ChatCompletionChoice( + message=message, + finish_reason="stop" + )] + ) + + logger.debug(f"Sending response: {chat_response.dict()}") + return chat_response + + except Exception as e: + logger.error(f"Error in create_chat_completion: {str(e)}") + return JSONResponse( + status_code=500, + content={"error": str(e), "detail": "Failed to process request"} + ) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..6b11265 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +requests>=2.31.0 +fastapi==0.109.2 +uvicorn==0.27.1 +python-dotenv>=1.0.0 +pip install dotenv + +python -m venv venv +.\venv\Scripts\activate \ No newline at end of file