import os
from langchain_openai import ChatOpenAI
from langchain.callbacks.base import BaseCallbackHandler
from ppio_sandbox.agent_runtime import AgentRuntimeApp
app = AgentRuntimeApp()
class StreamingHandler(BaseCallbackHandler):
"""流式回调处理器"""
def __init__(self):
self.tokens = []
def on_llm_new_token(self, token: str, **kwargs):
self.tokens.append(token)
@app.entrypoint
def langchain_streaming_agent(request: dict):
"""LangChain 流式响应"""
prompt = request.get("prompt", "")
# 创建支持流式的 LLM
llm = ChatOpenAI(
api_key=os.getenv("PPIO_API_KEY"),
streaming=True
)
# 流式调用
for chunk in llm.stream(prompt):
if chunk.content:
yield {
"chunk": chunk.content,
"type": "content"
}
yield {"chunk": "", "type": "end"}