複雑な LLM アプリケーションでは、異なる入力に異なる処理パスが必要になることがよくあります。動的ルーティングは次のことに役立ちます:
from langchain.chains import LLMChain from langchain.prompts import ChatPromptTemplate from langchain.output_parsers import PydanticOutputParser from pydantic import BaseModel, Field from typing import Optional, List import asyncio class RouteDecision(BaseModel): route: str = Field(description="The selected processing route") confidence: float = Field(description="Confidence score of the decision") reasoning: str = Field(description="Explanation for the routing decision") class IntelligentRouter: def __init__(self, routes: List[str]): self.routes = routes self.parser = PydanticOutputParser(pydantic_object=RouteDecision) self.route_prompt = ChatPromptTemplate.from_template( """Analyze the following input and decide the best processing route. Available routes: {routes} Input: {input} {format_instructions} """ )
async def decide_route(self, input_text: str) -> RouteDecision: prompt = self.route_prompt.format( routes=self.routes, input=input_text, format_instructions=self.parser.get_format_instructions() ) chain = LLMChain( llm=self.llm, prompt=self.route_prompt ) result = await chain.arun(input=input_text) return self.parser.parse(result)
class MultiLangProcessor: def __init__(self): self.router = IntelligentRouter([ "translation", "summarization", "sentiment_analysis", "content_moderation" ]) self.processors = { "translation": TranslationChain(), "summarization": SummaryChain(), "sentiment_analysis": SentimentChain(), "content_moderation": ModerationChain() } async def process(self, content: str) -> Dict: try: route = await self.router.decide_route(content) if route.confidence < 0.8: return await self.handle_low_confidence(content, route) processor = self.processors[route.route] result = await processor.run(content) return { "status": "success", "route": route.route, "result": result } except Exception as e: return await self.handle_error(e, content)
class ErrorHandler: def __init__(self): self.fallback_llm = ChatOpenAI( model_name="gpt-3.5-turbo", temperature=0.3 ) self.retry_limit = 3 self.backoff_factor = 1.5 async def handle_error( self, error: Exception, context: Dict ) -> Dict: error_type = type(error).__name__ if error_type in self.error_strategies: return await self.error_strategies[error_type]( error, context ) return await self.default_error_handler(error, context) async def retry_with_backoff( self, func, *args, **kwargs ): for attempt in range(self.retry_limit): try: return await func(*args, **kwargs) except Exception as e: if attempt == self.retry_limit - 1: raise e await asyncio.sleep( self.backoff_factor ** attempt )
class ModelFallbackChain: def __init__(self): self.models = [ ChatOpenAI(model_name="gpt-4"), ChatOpenAI(model_name="gpt-3.5-turbo"), ChatOpenAI(model_name="gpt-3.5-turbo-16k") ] async def run_with_fallback( self, prompt: str ) -> Optional[str]: for model in self.models: try: return await self.try_model(model, prompt) except Exception as e: continue return await self.final_fallback(prompt)
class ChunkingStrategy: def __init__(self, chunk_size: int = 1000): self.chunk_size = chunk_size def chunk_content( self, content: str ) -> List[str]: # Implement smart content chunking return [ content[i:i + self.chunk_size] for i in range(0, len(content), self.chunk_size) ] async def process_chunks( self, chunks: List[str] ) -> List[Dict]: results = [] for chunk in chunks: try: result = await self.process_single_chunk(chunk) results.append(result) except Exception as e: results.append(self.handle_chunk_error(e, chunk)) return results
ルート設計の原則
エラー処理ガイドライン
パフォーマンスの最適化
条件付きチェーンは、堅牢な LLM アプリケーションを構築するために重要です。重要なポイント:
以上が条件付きチェーンを使用したインテリジェントな LLM アプリケーションの構築 - 詳細の詳細内容です。詳細については、PHP 中国語 Web サイトの他の関連記事を参照してください。