Overview
LemonData is fully compatible with the OpenAI SDK. Just change the base URL and you can access 300+ models.Installation
Copy
pip install openai
Configuration
Copy
from openai import OpenAI
client = OpenAI(
api_key="sk-your-lemondata-key",
base_url="https://api.lemondata.cc/v1"
)
Chat Completions
Works exactly like the OpenAI API:Copy
response = client.chat.completions.create(
model="gpt-4o", # Or any LemonData model
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
],
temperature=0.7,
max_tokens=1000
)
print(response.choices[0].message.content)
Streaming
Copy
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Tell me a story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
Function Calling / Tools
Copy
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
tools=[{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}
}
}]
)
# Check if model wants to call a function
if response.choices[0].message.tool_calls:
tool_call = response.choices[0].message.tool_calls[0]
print(f"Function: {tool_call.function.name}")
print(f"Arguments: {tool_call.function.arguments}")
Vision
Copy
response = client.chat.completions.create(
model="gpt-4o", # Or claude-3-5-sonnet-20241022
messages=[{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{"type": "image_url", "image_url": {"url": "https://example.com/image.jpg"}}
]
}]
)
Image Generation
Copy
response = client.images.generate(
model="dall-e-3",
prompt="A white siamese cat",
size="1024x1024",
quality="standard",
n=1
)
print(response.data[0].url)
Embeddings
Copy
response = client.embeddings.create(
model="text-embedding-3-small",
input="Hello world"
)
print(response.data[0].embedding[:5]) # First 5 dimensions
Audio - Text to Speech
Copy
response = client.audio.speech.create(
model="tts-1",
voice="alloy",
input="Hello, welcome to LemonData!"
)
response.stream_to_file("output.mp3")
Audio - Transcription
Copy
with open("audio.mp3", "rb") as audio_file:
response = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file
)
print(response.text)
Using Different Models
The key advantage of LemonData is accessing multiple providers:Copy
# OpenAI
response = client.chat.completions.create(model="gpt-4o", messages=messages)
# Anthropic Claude
response = client.chat.completions.create(model="claude-3-5-sonnet-20241022", messages=messages)
# Google Gemini
response = client.chat.completions.create(model="gemini-2.0-flash", messages=messages)
# DeepSeek
response = client.chat.completions.create(model="deepseek-r1", messages=messages)
# Meta Llama
response = client.chat.completions.create(model="llama-3.3-70b", messages=messages)
Error Handling
Copy
from openai import APIError, RateLimitError, AuthenticationError
try:
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello"}]
)
except AuthenticationError:
print("Invalid API key")
except RateLimitError:
print("Rate limit exceeded, please wait")
except APIError as e:
print(f"API error: {e.status_code} - {e.message}")