Use this file to discover all available pages before exploring further.
LangChain is a powerful framework for building large language model (LLM) applications. Through Laozhang API, you can use various mainstream AI models in LangChain, building complex AI application chains.
import osfrom langchain_openai import ChatOpenAI# Set environment variablesos.environ["OPENAI_API_BASE"] = "https://api.laozhang.ai/v1"os.environ["OPENAI_API_KEY"] = "Your Laozhang API key"# Initialize modelllm = ChatOpenAI( model="gpt-4-turbo", temperature=0.7, max_tokens=2000)# Test callresponse = llm.invoke("Hello!")print(response.content)
API Key ManagementIt’s recommended to use environment variables to manage API Keys, avoiding hardcoding keys in code.Visit Laozhang API Console to obtain your API Key.
from langchain.memory import ConversationBufferMemoryfrom langchain.chains import ConversationChain# Create memorymemory = ConversationBufferMemory()# Create conversation chainconversation = ConversationChain( llm=llm, memory=memory, verbose=True)# Multi-turn conversationresponse1 = conversation.predict(input="My name is Zhang San")response2 = conversation.predict(input="What's my name?")print(response2) # Will remember your name is Zhang San
# Use GPT-3.5 for simple tasks (lower cost)cheap_llm = ChatOpenAI( model="gpt-3.5-turbo", temperature=0.5)# Use GPT-4 for complex tasks (higher quality)expensive_llm = ChatOpenAI( model="gpt-4-turbo", temperature=0.7)# Simple classification taskclassification = cheap_llm.invoke("Classify this text sentiment: This product is great!")# Complex analysis taskanalysis = expensive_llm.invoke("Please analyze this product review in depth: This product is great!")
Support streaming output for better user experience:
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler# Create model with streaming supportstreaming_llm = ChatOpenAI( model="gpt-4-turbo", temperature=0.7, streaming=True, callbacks=[StreamingStdOutCallbackHandler()])# Streaming responseresponse = streaming_llm.invoke("Please write a short story")
from langchain.tools import Tooldef search_database(query: str) -> str: """Search database""" # Your database search logic return f"Search result for: {query}"# Create toolsearch_tool = Tool( name="DatabaseSearch", func=search_database, description="Search information from database. Input should be search keywords.")
from langchain.cache import InMemoryCacheimport langchain# Enable cachelangchain.llm_cache = InMemoryCache()# First call (slower)result1 = llm.invoke("What is AI?")# Second call (use cache, faster)result2 = llm.invoke("What is AI?")
# ❌ Bad exampleprompt = "Write something"# ✅ Good exampleprompt = """You are a professional technical writer.Task: Write an article about AI applicationsRequirements:- Word count: 1000 words- Audience: Technical professionals- Style: Professional yet approachable- Structure: Introduction - Body - Conclusion- Include: 3 practical casesPlease start writing:"""