Using Official OpenAI SDKs
LaoZhang API is fully compatible with OpenAI API format. You can use official OpenAI SDKs directly with minimal configuration changes.Supported Official SDKs
LaoZhang API supports all official OpenAI SDKs:- Python (
openai
) - Node.js (
openai
) - .NET (
OpenAI
) - Go (
go-openai
) - Java (third-party)
- PHP (third-party)
- Ruby (third-party)
Python SDK
Installation
Copy
pip install openai
Basic Configuration
Copy
from openai import OpenAI
# Configure LaoZhang API service
client = OpenAI(
api_key="YOUR_API_KEY", # Your LaoZhang API key
base_url="https://api.laozhang.ai/v1" # LaoZhang API endpoint
)
# Usage is identical to official OpenAI
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Hello!"}
]
)
print(response.choices[0].message.content)
Environment Variable Configuration
Copy
import os
from openai import OpenAI
# Set environment variables
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
os.environ["OPENAI_BASE_URL"] = "https://api.laozhang.ai/v1"
# Use default configuration
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Explain quantum computing"}]
)
Async Usage
Copy
import asyncio
from openai import AsyncOpenAI
async def main():
client = AsyncOpenAI(
api_key="YOUR_API_KEY",
base_url="https://api.laozhang.ai/v1"
)
response = await client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
asyncio.run(main())
Streaming Output
Copy
from openai import OpenAI
client = OpenAI(
api_key="YOUR_API_KEY",
base_url="https://api.laozhang.ai/v1"
)
stream = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Write a short story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
Node.js SDK
Installation
Copy
npm install openai
Basic Configuration
Copy
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: 'YOUR_API_KEY',
baseURL: 'https://api.laozhang.ai/v1'
});
const response = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Hello!' }]
});
console.log(response.choices[0].message.content);
Environment Variable Configuration
Copy
// Set environment variables
process.env.OPENAI_API_KEY = 'YOUR_API_KEY';
process.env.OPENAI_BASE_URL = 'https://api.laozhang.ai/v1';
import OpenAI from 'openai';
// Use default configuration
const openai = new OpenAI();
const response = await openai.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Explain AI to a 5-year-old' }]
});
Streaming Output
Copy
const stream = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Tell me a joke' }],
stream: true
});
for await (const chunk of stream) {
if (chunk.choices[0]?.delta?.content) {
process.stdout.write(chunk.choices[0].delta.content);
}
}
TypeScript Support
Copy
import OpenAI from 'openai';
import type { ChatCompletionCreateParamsNonStreaming } from 'openai/resources/chat/completions';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY!,
baseURL: 'https://api.laozhang.ai/v1'
});
const params: ChatCompletionCreateParamsNonStreaming = {
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Hello TypeScript!' }],
temperature: 0.7
};
const response = await openai.chat.completions.create(params);
.NET SDK
Installation
Copy
dotnet add package OpenAI
Basic Configuration
Copy
using OpenAI;
using OpenAI.Chat;
var client = new OpenAIClient("YOUR_API_KEY", new OpenAIClientOptions
{
Endpoint = new Uri("https://api.laozhang.ai/v1")
});
var chatClient = client.GetChatClient("gpt-3.5-turbo");
var response = await chatClient.CompleteChatAsync("Hello!");
Console.WriteLine(response.Value.Content[0].Text);
Streaming Output
Copy
await foreach (var update in chatClient.CompleteChatStreamingAsync("Tell me a story"))
{
if (update.ContentUpdate.Count > 0)
{
Console.Write(update.ContentUpdate[0].Text);
}
}
Go SDK
Installation
Copy
go get github.com/sashabaranov/go-openai
Basic Configuration
Copy
package main
import (
"context"
"fmt"
"github.com/sashabaranov/go-openai"
)
func main() {
config := openai.DefaultConfig("YOUR_API_KEY")
config.BaseURL = "https://api.laozhang.ai/v1"
client := openai.NewClientWithConfig(config)
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
},
)
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
Model Switching
Using Different Models
Copy
# GPT models
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello"}]
)
# Claude models
response = client.chat.completions.create(
model="claude-3-opus-20240229",
messages=[{"role": "user", "content": "Hello"}]
)
# Gemini models
response = client.chat.completions.create(
model="gemini-pro",
messages=[{"role": "user", "content": "Hello"}]
)
Dynamic Model Selection
Copy
def chat_with_model(message: str, model: str = "gpt-3.5-turbo"):
"""Chat function with dynamic model switching"""
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": message}]
)
return response.choices[0].message.content
# Use different models
print(chat_with_model("Explain quantum computing", "gpt-4"))
print(chat_with_model("Explain quantum computing", "claude-3-opus-20240229"))
print(chat_with_model("Explain quantum computing", "gemini-pro"))
Advanced Features
Function Calling
Copy
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather information for a specified city",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
}
},
"required": ["location"]
}
}
}
]
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "What's the weather in Beijing?"}],
tools=tools,
tool_choice="auto"
)
if response.choices[0].message.tool_calls:
print("AI wants to call function:", response.choices[0].message.tool_calls[0].function.name)
Image Input
Copy
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://example.com/image.jpg"
}
}
]
}
]
)
Embeddings
Copy
response = client.embeddings.create(
model="text-embedding-3-small",
input="Text content to embed"
)
embedding = response.data[0].embedding
print(f"Vector dimension: {len(embedding)}")
Error Handling
Basic Error Handling
Copy
from openai import OpenAI, OpenAIError
try:
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello"}]
)
except OpenAIError as e:
print(f"API error: {e}")
Detailed Error Handling
Copy
from openai import (
OpenAI,
APIError,
APIConnectionError,
RateLimitError,
InternalServerError
)
try:
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello"}]
)
except RateLimitError:
print("Rate limit exceeded, please retry later")
except APIConnectionError:
print("Network connection error")
except InternalServerError:
print("Internal server error")
except APIError as e:
print(f"API error: {e}")
Best Practices
1. Configuration Management
Copy
import os
from openai import OpenAI
class LaoZhangClient:
def __init__(self):
self.client = OpenAI(
api_key=os.getenv("LAOZHANG_API_KEY"),
base_url=os.getenv("LAOZHANG_BASE_URL", "https://api.laozhang.ai/v1")
)
def chat(self, message: str, model: str = "gpt-3.5-turbo"):
return self.client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": message}]
)
2. Retry Mechanism
Copy
import time
import random
from openai import OpenAI, RateLimitError
def chat_with_retry(client, messages, max_retries=3):
for attempt in range(max_retries):
try:
return client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages
)
except RateLimitError:
if attempt < max_retries - 1:
wait_time = (2 ** attempt) + random.uniform(0, 1)
time.sleep(wait_time)
else:
raise
3. Cost Control
Copy
def controlled_chat(message: str, max_tokens: int = 150):
"""Control output length for cost management"""
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": message}],
max_tokens=max_tokens,
temperature=0.7
)
return response
Migration Guide
Migrating from OpenAI
If you’re already using official OpenAI services, migrating to LaoZhang API is straightforward:- Change base_url
Copy
# Original configuration
client = OpenAI(api_key="sk-...")
# Change to LaoZhang API
client = OpenAI(
api_key="YOUR_LAOZHANG_KEY",
base_url="https://api.laozhang.ai/v1"
)
- Update Environment Variables
Copy
# Original
export OPENAI_API_KEY="sk-..."
# Change to
export OPENAI_API_KEY="YOUR_LAOZHANG_KEY"
export OPENAI_BASE_URL="https://api.laozhang.ai/v1"
- No Code Changes Required All other code remains unchanged, including:
- Method calls
- Parameter formats
- Response handling
Multi-Provider Compatibility
Copy
class MultiProviderClient:
def __init__(self):
self.laozhang_client = OpenAI(
api_key="LAOZHANG_KEY",
base_url="https://api.laozhang.ai/v1"
)
self.openai_client = OpenAI(api_key="OPENAI_KEY")
def chat(self, message: str, provider: str = "laozhang"):
client = self.laozhang_client if provider == "laozhang" else self.openai_client
return client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": message}]
)