91 lines
3.2 KiB
Python
91 lines
3.2 KiB
Python
import os
|
|
from dotenv import load_dotenv
|
|
from llm_factory import LLMFactory
|
|
|
|
# Load environment variables from .env file
|
|
load_dotenv()
|
|
|
|
|
|
def main():
|
|
"""Main function to test the LLM services."""
|
|
print("Testing LLM Services...")
|
|
|
|
# --- Test OpenAI ---
|
|
try:
|
|
print("\n--- Testing OpenAI ---")
|
|
openai_service = LLMFactory.create_service("openai")
|
|
if openai_service:
|
|
prompt_openai = "Explain the importance of structured JSON output from LLMs in one sentence."
|
|
response_openai = openai_service.generate_text(prompt_openai)
|
|
print(f"OpenAI Prompt: {prompt_openai}")
|
|
print(f"OpenAI Response: {response_openai}")
|
|
else:
|
|
print("Failed to create OpenAI service.")
|
|
except Exception as e:
|
|
print(f"Error testing OpenAI: {e}")
|
|
|
|
# --- Test Groq ---
|
|
try:
|
|
print("\n--- Testing Groq ---")
|
|
groq_service = LLMFactory.create_service("groq")
|
|
if groq_service:
|
|
prompt_groq = (
|
|
"Explain the concept of 'inference speed' for LLMs in one sentence."
|
|
)
|
|
response_groq = groq_service.generate_text(prompt_groq)
|
|
print(f"Groq Prompt: {prompt_groq}")
|
|
print(f"Groq Response: {response_groq}")
|
|
else:
|
|
print("Failed to create Groq service.")
|
|
except Exception as e:
|
|
print(f"Error testing Groq: {e}")
|
|
|
|
# --- Test Claude ---
|
|
try:
|
|
print("\n--- Testing Claude ---")
|
|
claude_service = LLMFactory.create_service("claude")
|
|
if claude_service:
|
|
prompt_claude = (
|
|
"What is Anthropic's Constitutional AI concept in one sentence?"
|
|
)
|
|
response_claude = claude_service.generate_text(prompt_claude)
|
|
print(f"Claude Prompt: {prompt_claude}")
|
|
print(f"Claude Response: {response_claude}")
|
|
else:
|
|
print("Failed to create Claude service.")
|
|
except Exception as e:
|
|
print(f"Error testing Claude: {e}")
|
|
|
|
# --- Test Grok (xAI) ---
|
|
try:
|
|
print("\n--- Testing Grok (xAI) ---")
|
|
grok_service = LLMFactory.create_service("grok")
|
|
if grok_service:
|
|
prompt_grok = "What is the mission of xAI in one sentence?"
|
|
response_grok = grok_service.generate_text(prompt_grok)
|
|
print(f"Grok Prompt: {prompt_grok}")
|
|
print(f"Grok Response: {response_grok}")
|
|
else:
|
|
print("Failed to create Grok service.")
|
|
except Exception as e:
|
|
print(f"Error testing Grok (xAI): {e}")
|
|
|
|
# --- Test Ollama ---
|
|
try:
|
|
print("\n--- Testing Ollama ---")
|
|
# Make sure you have an Ollama model running, e.g., `ollama run llama3.1`
|
|
ollama_service = LLMFactory.create_service("ollama", model="llama3.1")
|
|
if ollama_service:
|
|
prompt_ollama = "What is Ollama?"
|
|
response_ollama = ollama_service.generate_text(prompt_ollama)
|
|
print(f"Ollama Prompt: {prompt_ollama}")
|
|
print(f"Ollama Response: {response_ollama}")
|
|
else:
|
|
print("Failed to create Ollama service.")
|
|
except Exception as e:
|
|
print(f"Error testing Ollama: {e}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|