|
import os |
|
import requests |
|
from typing import List, Dict, Any |
|
|
|
def GrokApi( |
|
system_prompt: str, |
|
user_input: str, |
|
tools: List[Dict[str, Any]] = None, |
|
tool_choice: str = "auto", |
|
reasoning_effort: str = "default", |
|
response_format: Dict[str, Any] = None, |
|
temperature: float = 0.3, |
|
max_completion_tokens: int = 2000, |
|
include_reasoning: str = False |
|
) -> str: |
|
""" |
|
Make a request to the Grok API and return the response content, supporting tool usage and agentic features. |
|
|
|
Args: |
|
system_prompt (str): The system prompt to set the context. |
|
user_input (str): The user input to process. |
|
tools (List[Dict[str, Any]], optional): List of tool definitions for tool-calling. |
|
tool_choice (str, optional): Controls tool usage ("none", "auto", "required"). Defaults to "auto". |
|
reasoning_effort (str, optional): Reasoning mode for Qwen3 models ("none", "default"). Defaults to "default". |
|
response_format (Dict[str, Any], optional): Format for structured outputs (e.g., JSON schema). |
|
temperature (float, optional): Sampling temperature (0 to 2). Defaults to 0.7 for determinism. |
|
max_completion_tokens (int, optional): Max tokens in response. Defaults to 1000. |
|
|
|
Returns: |
|
str: The content of the assistant's response or tool call results, or empty string on error. |
|
""" |
|
|
|
|
|
api_key=os.getenv("groq_api") |
|
if not api_key: |
|
print("Grok API error: GROQ_API_KEY environment variable not set") |
|
return "" |
|
|
|
|
|
api_url = "https://api.groq.com/openai/v1/chat/completions" |
|
|
|
|
|
messages = [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_input} |
|
] |
|
|
|
|
|
payload = { |
|
"model": "qwen/qwen3-32b", |
|
"messages": messages, |
|
"temperature": max(0, min(temperature, 2)), |
|
"include_reasoning": include_reasoning, |
|
"max_completion_tokens": max_completion_tokens |
|
} |
|
|
|
|
|
if tools: |
|
payload["tools"] = tools |
|
if tool_choice in ["none", "auto", "required"]: |
|
payload["tool_choice"] = tool_choice |
|
else: |
|
print(f"Grok API warning: Invalid tool_choice '{tool_choice}', defaulting to 'auto'") |
|
payload["tool_choice"] = "auto" |
|
|
|
|
|
if reasoning_effort in ["none", "default"]: |
|
payload["reasoning_effort"] = reasoning_effort |
|
else: |
|
print(f"Grok API warning: Invalid reasoning_effort '{reasoning_effort}', defaulting to 'default'") |
|
payload["reasoning_effort"] = "default" |
|
|
|
|
|
if response_format: |
|
payload["response_format"] = response_format |
|
|
|
|
|
headers = { |
|
"Content-Type": "application/json", |
|
"Authorization": f"Bearer {api_key}" |
|
} |
|
|
|
try: |
|
|
|
response = requests.post(api_url, headers=headers, json=payload, timeout=60) |
|
response.raise_for_status() |
|
|
|
|
|
result = response.json() |
|
choice = result.get("choices", [{}])[0] |
|
message = choice.get("message", {}) |
|
|
|
|
|
if "tool_calls" in message: |
|
tool_calls = message["tool_calls"] |
|
tool_results = [] |
|
for tool_call in tool_calls: |
|
tool_name = tool_call.get("function", {}).get("name", "") |
|
tool_args = tool_call.get("function", {}).get("arguments", "{}") |
|
tool_results.append(f"Tool Call: {tool_name} with args {tool_args}") |
|
return "; ".join(tool_results) |
|
|
|
|
|
content = message.get("content", "") |
|
return content.strip() |
|
|
|
except requests.exceptions.HTTPError as e: |
|
print(f"Grok API error: HTTP {e.response.status_code} - {e.response.text}") |
|
return "" |
|
except requests.exceptions.RequestException as e: |
|
print(f"Grok API error: Network error - {e}") |
|
return "" |
|
except (KeyError, ValueError) as e: |
|
print(f"Grok API error: Unexpected response format - {e}") |
|
return "" |
|
except Exception as e: |
|
print(f"Grok API error: Unexpected error - {e}") |
|
return "" |
|
|
|
|
|
|
|
def open_oss_get_llm_response( |
|
system_prompt: str, |
|
user_input: str, |
|
tools: List[Dict[str, Any]] = None, |
|
tool_choice: str = "auto", |
|
temperature: float = 0.1, |
|
max_completion_tokens: int = 3000 |
|
) -> str: |
|
""" |
|
Make a request to the Grok API and return the response content, supporting tool usage and agentic features. |
|
|
|
Args: |
|
system_prompt (str): The system prompt to set the context. |
|
user_input (str): The user input to process. |
|
tools (List[Dict[str, Any]], optional): List of tool definitions for tool-calling. |
|
tool_choice (str, optional): Controls tool usage ("none", "auto", "required"). Defaults to "auto". |
|
temperature (float, optional): Sampling temperature (0 to 2). Defaults to 0.7 for determinism. |
|
max_completion_tokens (int, optional): Max tokens in response. Defaults to 1000. |
|
|
|
Returns: |
|
str: The content of the assistant's response or tool call results, or empty string on error. |
|
""" |
|
|
|
api_key = os.getenv("GROQ_API_KEY") |
|
if not api_key: |
|
print("Grok API error: GROQ_API_KEY environment variable not set") |
|
return "" |
|
|
|
|
|
api_url = "https://api.groq.com/openai/v1/chat/completions" |
|
|
|
|
|
messages = [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_input} |
|
] |
|
|
|
|
|
payload = { |
|
"model": "openai/gpt-oss-20b", |
|
"messages": messages, |
|
"temperature": max(0, min(temperature, 2)), |
|
"max_completion_tokens": max_completion_tokens, |
|
"reasoning_effort": "medium" |
|
} |
|
|
|
|
|
if tools: |
|
payload["tools"] = tools |
|
if tool_choice in ["none", "auto", "required"]: |
|
payload["tool_choice"] = tool_choice |
|
else: |
|
print(f"Grok API warning: Invalid tool_choice '{tool_choice}', defaulting to 'auto'") |
|
payload["tool_choice"] = "auto" |
|
|
|
|
|
|
|
headers = { |
|
"Content-Type": "application/json", |
|
"Authorization": f"Bearer {api_key}" |
|
} |
|
|
|
try: |
|
|
|
response = requests.post(api_url, headers=headers, json=payload, timeout=60) |
|
response.raise_for_status() |
|
|
|
|
|
result = response.json() |
|
choice = result.get("choices", [{}])[0] |
|
message = choice.get("message", {}) |
|
|
|
|
|
if "tool_calls" in message: |
|
tool_calls = message["tool_calls"] |
|
tool_results = [] |
|
for tool_call in tool_calls: |
|
tool_name = tool_call.get("function", {}).get("name", "") |
|
tool_args = tool_call.get("function", {}).get("arguments", "{}") |
|
tool_results.append(f"Tool Call: {tool_name} with args {tool_args}") |
|
return "; ".join(tool_results) |
|
|
|
|
|
content = message.get("content", "") |
|
return content.strip() |
|
|
|
except requests.exceptions.HTTPError as e: |
|
print(f"Grok API error: HTTP {e.response.status_code} - {e.response.text}") |
|
return "" |
|
except requests.exceptions.RequestException as e: |
|
print(f"Grok API error: Network error - {e}") |
|
return "" |
|
except (KeyError, ValueError) as e: |
|
print(f"Grok API error: Unexpected response format - {e}") |
|
return "" |
|
except Exception as e: |
|
print(f"Grok API error: Unexpected error - {e}") |
|
return "" |