CerebrasLLMService provides access to Cerebras’s language models through an OpenAI-compatible interface. It inherits from OpenAILLMService and supports streaming responses, function calling, and context management.
from pipecat.services.cerebras.llm import CerebrasLLMServicefrom pipecat.processors.aggregators.openai_llm_context import OpenAILLMContextfrom pipecat.adapters.schemas.function_schema import FunctionSchemafrom pipecat.adapters.schemas.tools_schema import ToolsSchemafrom pipecat.pipeline.pipeline import Pipelinefrom pipecat.pipeline.task import PipelineParams, PipelineTaskfrom pipecat.services.llm_service import FunctionCallParams# Configure servicellm = CerebrasLLMService( api_key="your-cerebras-api-key", model="llama-3.3-70b")# Define weather function using standardized schemaweather_function = FunctionSchema( name="get_current_weather", description="Get the current weather", properties={ "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use" } }, required=["location", "format"])# Create tools schematools = ToolsSchema(standard_tools=[weather_function])# Create context with system message and toolscontext = OpenAILLMContext( messages=[ { "role": "system", "content": """You are a helpful LLM in a WebRTC call. Your goal is to demonstrate your capabilities in a succinct way.You have one function available:1. get_current_weather is used to get current weather information.Infer whether to use Fahrenheit or Celsius automatically based on the location, unless the user specifies a preference. Start by asking me for my location. Then, use 'get_current_weather' to give me a forecast. Respond to what the user said in a creative and helpful way.""", }, ], tools=tools)# Register function handlersasync def fetch_weather(params: FunctionCallParams): await params.result_callback({"conditions": "nice", "temperature": "75"})llm.register_function("get_current_weather", fetch_weather)# Create context aggregator for message handlingcontext_aggregator = llm.create_context_aggregator(context)# Set up pipelinepipeline = Pipeline([ transport.input(), context_aggregator.user(), llm, tts, transport.output(), context_aggregator.assistant()])# Create and configure tasktask = PipelineTask( pipeline, params=PipelineParams( allow_interruptions=True, enable_metrics=True, enable_usage_metrics=True, ),)
This service supports function calling (also known as tool calling) which allows the LLM to request information from external services and APIs. For example, you can enable your bot to: