File size: 3,366 Bytes
031f9b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
import os
from dataclasses import dataclass
from typing import Optional
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
@dataclass
class Settings:
"""Application-wide configuration settings."""
# LLM Provider settings
llm_provider: str = os.getenv("LLM_PROVIDER", "auto")
# Hugging Face settings
hf_token: str = os.getenv("HF_TOKEN", "")
hf_chat_model: str = os.getenv("HF_CHAT_MODEL", "Qwen/Qwen2.5-7B-Instruct")
hf_temperature: float = 0.001
hf_max_new_tokens: int = 512
# Model settings
model_name: str = os.getenv("MODEL_NAME", "Qwen/Qwen2.5-7B-Instruct")
# Audio provider settings
audio_provider: str = os.getenv("AUDIO_PROVIDER", "auto")
tts_model: str = os.getenv("TTS_MODEL", "canopylabs/orpheus-3b-0.1-ft")
stt_model: str = os.getenv("STT_MODEL", "openai/whisper-large-v3")
# Screen sharing settings
screen_capture_interval: float = float(os.getenv("SCREEN_CAPTURE_INTERVAL", "1.0"))
screen_compression_quality: int = int(os.getenv("SCREEN_COMPRESSION_QUALITY", "50"))
max_width: int = int(os.getenv("SCREEN_MAX_WIDTH", "3440"))
max_height: int = int(os.getenv("SCREEN_MAX_HEIGHT", "1440"))
NEBIUS_MODEL: str = os.getenv("NEBIUS_MODEL", "google/gemma-3-27b-it")
NEBIUS_API_KEY: str = os.getenv("NEBIUS_API_KEY", "Not found")
NEBIUS_BASE_URL: str = os.getenv("NEBIUS_BASE_URL", "https://api.studio.nebius.com/v1/")
# Hyper-V settings
hyperv_enabled: bool = os.getenv("HYPERV_ENABLED", "false").lower() == "true"
hyperv_host: str = os.getenv("HYPERV_HOST", "localhost")
hyperv_username: Optional[str] = os.getenv("HYPERV_USERNAME")
hyperv_password: Optional[str] = os.getenv("HYPERV_PASSWORD")
# Application settings
max_conversation_history: int = int(os.getenv("MAX_CONVERSATION_HISTORY", "50"))
temp_dir: str = os.getenv("TEMP_DIR", "./temp")
log_level: str = os.getenv("LOG_LEVEL", "INFO")
def __post_init__(self):
# Ensure necessary directories exist
Path(self.temp_dir).mkdir(exist_ok=True, parents=True)
Path("./config").mkdir(exist_ok=True, parents=True)
Path("./logs").mkdir(exist_ok=True, parents=True)
def is_hf_token_valid(self) -> bool:
return bool(self.hf_token and len(self.hf_token) > 10)
@property
def effective_llm_provider(self) -> str:
if self.llm_provider == "auto":
return "huggingface" if self.is_hf_token_valid() else "openai"
return self.llm_provider
@property
def effective_audio_provider(self) -> str:
if self.audio_provider == "auto":
return "huggingface" if self.is_hf_token_valid() else "openai"
return self.audio_provider
@property
def llm_endpoint(self) -> str:
if self.effective_llm_provider == "huggingface":
return f"https://api-inference.huggingface.co/models/{self.hf_chat_model}"
return self.openai_endpoint
@property
def llm_api_key(self) -> str:
return self.hf_token if self.effective_llm_provider == "huggingface" else self.openai_api_key
@property
def effective_model_name(self) -> str:
return self.hf_chat_model if self.effective_llm_provider == "huggingface" else self.model_name
|