|
1 | | -from __future__ import annotations |
2 | | - |
3 | | -import abc |
4 | | -import enum |
5 | | -from collections.abc import AsyncIterator |
6 | | -from typing import TYPE_CHECKING |
7 | | - |
8 | | -from openai.types.responses.response_prompt_param import ResponsePromptParam |
9 | | - |
10 | | -from ..agent_output import AgentOutputSchemaBase |
11 | | -from ..handoffs import Handoff |
12 | | -from ..items import ModelResponse, TResponseInputItem, TResponseStreamEvent |
13 | | -from ..tool import Tool |
14 | | - |
15 | | -if TYPE_CHECKING: |
16 | | - from ..model_settings import ModelSettings |
17 | | - |
18 | | - |
19 | | -class ModelTracing(enum.Enum): |
20 | | - DISABLED = 0 |
21 | | - """Tracing is disabled entirely.""" |
22 | | - |
23 | | - ENABLED = 1 |
24 | | - """Tracing is enabled, and all data is included.""" |
25 | | - |
26 | | - ENABLED_WITHOUT_DATA = 2 |
27 | | - """Tracing is enabled, but inputs/outputs are not included.""" |
28 | | - |
29 | | - def is_disabled(self) -> bool: |
30 | | - return self == ModelTracing.DISABLED |
31 | | - |
32 | | - def include_data(self) -> bool: |
33 | | - return self == ModelTracing.ENABLED |
34 | | - |
35 | | - |
36 | | -class Model(abc.ABC): |
37 | | - """The base interface for calling an LLM.""" |
38 | | - |
39 | | - @abc.abstractmethod |
40 | | - async def get_response( |
41 | | - self, |
42 | | - system_instructions: str | None, |
43 | | - input: str | list[TResponseInputItem], |
44 | | - model_settings: ModelSettings, |
45 | | - tools: list[Tool], |
46 | | - output_schema: AgentOutputSchemaBase | None, |
47 | | - handoffs: list[Handoff], |
48 | | - tracing: ModelTracing, |
49 | | - *, |
50 | | - previous_response_id: str | None, |
51 | | - conversation_id: str | None, |
52 | | - prompt: ResponsePromptParam | None, |
53 | | - ) -> ModelResponse: |
54 | | - """Get a response from the model. |
55 | | -
|
56 | | - Args: |
57 | | - system_instructions: The system instructions to use. |
58 | | - input: The input items to the model, in OpenAI Responses format. |
59 | | - model_settings: The model settings to use. |
60 | | - tools: The tools available to the model. |
61 | | - output_schema: The output schema to use. |
62 | | - handoffs: The handoffs available to the model. |
63 | | - tracing: Tracing configuration. |
64 | | - previous_response_id: the ID of the previous response. Generally not used by the model, |
65 | | - except for the OpenAI Responses API. |
66 | | - conversation_id: The ID of the stored conversation, if any. |
67 | | - prompt: The prompt config to use for the model. |
68 | | -
|
69 | | - Returns: |
70 | | - The full model response. |
71 | | - """ |
72 | | - pass |
73 | | - |
74 | | - @abc.abstractmethod |
75 | | - def stream_response( |
76 | | - self, |
77 | | - system_instructions: str | None, |
78 | | - input: str | list[TResponseInputItem], |
79 | | - model_settings: ModelSettings, |
80 | | - tools: list[Tool], |
81 | | - output_schema: AgentOutputSchemaBase | None, |
82 | | - handoffs: list[Handoff], |
83 | | - tracing: ModelTracing, |
84 | | - *, |
85 | | - previous_response_id: str | None, |
86 | | - conversation_id: str | None, |
87 | | - prompt: ResponsePromptParam | None, |
88 | | - ) -> AsyncIterator[TResponseStreamEvent]: |
89 | | - """Stream a response from the model. |
90 | | -
|
91 | | - Args: |
92 | | - system_instructions: The system instructions to use. |
93 | | - input: The input items to the model, in OpenAI Responses format. |
94 | | - model_settings: The model settings to use. |
95 | | - tools: The tools available to the model. |
96 | | - output_schema: The output schema to use. |
97 | | - handoffs: The handoffs available to the model. |
98 | | - tracing: Tracing configuration. |
99 | | - previous_response_id: the ID of the previous response. Generally not used by the model, |
100 | | - except for the OpenAI Responses API. |
101 | | - conversation_id: The ID of the stored conversation, if any. |
102 | | - prompt: The prompt config to use for the model. |
103 | | -
|
104 | | - Returns: |
105 | | - An iterator of response stream events, in OpenAI Responses format. |
106 | | - """ |
107 | | - pass |
108 | | - |
109 | | - |
110 | | -class ModelProvider(abc.ABC): |
111 | | - """The base interface for a model provider. |
112 | | -
|
113 | | - Model provider is responsible for looking up Models by name. |
114 | | - """ |
115 | | - |
116 | | - @abc.abstractmethod |
117 | | - def get_model(self, model_name: str | None) -> Model: |
118 | | - """Get a model by name. |
119 | | -
|
120 | | - Args: |
121 | | - model_name: The name of the model to get. |
122 | | -
|
123 | | - Returns: |
124 | | - The model. |
125 | | - """ |
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +import abc |
| 4 | +import enum |
| 5 | +from collections.abc import AsyncIterator |
| 6 | +from typing import TYPE_CHECKING |
| 7 | + |
| 8 | +from openai.types.responses.response_prompt_param import ResponsePromptParam |
| 9 | + |
| 10 | +from ..agent_output import AgentOutputSchemaBase |
| 11 | +from ..handoffs import Handoff |
| 12 | +from ..items import ModelResponse, TResponseInputItem, TResponseStreamEvent |
| 13 | +from ..tool import Tool |
| 14 | + |
| 15 | +if TYPE_CHECKING: |
| 16 | + from ..model_settings import ModelSettings |
| 17 | + |
| 18 | + |
| 19 | +class ModelTracing(enum.Enum): |
| 20 | + DISABLED = 0 |
| 21 | + """Tracing is disabled entirely.""" |
| 22 | + |
| 23 | + ENABLED = 1 |
| 24 | + """Tracing is enabled, and all data is included.""" |
| 25 | + |
| 26 | + ENABLED_WITHOUT_DATA = 2 |
| 27 | + """Tracing is enabled, but inputs/outputs are not included.""" |
| 28 | + |
| 29 | + def is_disabled(self) -> bool: |
| 30 | + return self == ModelTracing.DISABLED |
| 31 | + |
| 32 | + def include_data(self) -> bool: |
| 33 | + return self == ModelTracing.ENABLED |
| 34 | + |
| 35 | + |
| 36 | +class Model(abc.ABC): |
| 37 | + """The base interface for calling an LLM.""" |
| 38 | + |
| 39 | + @abc.abstractmethod |
| 40 | + async def get_response( |
| 41 | + self, |
| 42 | + system_instructions: str | None, |
| 43 | + input: str | list[TResponseInputItem], |
| 44 | + model_settings: ModelSettings, |
| 45 | + tools: list[Tool], |
| 46 | + output_schema: AgentOutputSchemaBase | None, |
| 47 | + handoffs: list[Handoff], |
| 48 | + tracing: ModelTracing, |
| 49 | + *, |
| 50 | + previous_response_id: str | None, |
| 51 | + conversation_id: str | None, |
| 52 | + prompt: ResponsePromptParam | None, |
| 53 | + ) -> ModelResponse: |
| 54 | + """Get a response from the model. |
| 55 | +
|
| 56 | + Args: |
| 57 | + system_instructions: The system instructions to use. |
| 58 | + input: The input items to the model, in OpenAI Responses format. |
| 59 | + model_settings: The model settings to use. |
| 60 | + tools: The tools available to the model. |
| 61 | + output_schema: The output schema to use. |
| 62 | + handoffs: The handoffs available to the model. |
| 63 | + tracing: Tracing configuration. |
| 64 | + previous_response_id: the ID of the previous response. Generally not used by the model, |
| 65 | + except for the OpenAI Responses API. |
| 66 | + conversation_id: The ID of the stored conversation, if any. |
| 67 | + prompt: The prompt config to use for the model. |
| 68 | +
|
| 69 | + Returns: |
| 70 | + The full model response. |
| 71 | + """ |
| 72 | + pass |
| 73 | + |
| 74 | + @abc.abstractmethod |
| 75 | + def stream_response( |
| 76 | + self, |
| 77 | + system_instructions: str | None, |
| 78 | + input: str | list[TResponseInputItem], |
| 79 | + model_settings: ModelSettings, |
| 80 | + tools: list[Tool], |
| 81 | + output_schema: AgentOutputSchemaBase | None, |
| 82 | + handoffs: list[Handoff], |
| 83 | + tracing: ModelTracing, |
| 84 | + *, |
| 85 | + previous_response_id: str | None, |
| 86 | + conversation_id: str | None, |
| 87 | + prompt: ResponsePromptParam | None, |
| 88 | + ) -> AsyncIterator[TResponseStreamEvent]: |
| 89 | + """Stream a response from the model. |
| 90 | +
|
| 91 | + Args: |
| 92 | + system_instructions: The system instructions to use. |
| 93 | + input: The input items to the model, in OpenAI Responses format. |
| 94 | + model_settings: The model settings to use. |
| 95 | + tools: The tools available to the model. |
| 96 | + output_schema: The output schema to use. |
| 97 | + handoffs: The handoffs available to the model. |
| 98 | + tracing: Tracing configuration. |
| 99 | + previous_response_id: the ID of the previous response. Generally not used by the model, |
| 100 | + except for the OpenAI Responses API. |
| 101 | + conversation_id: The ID of the stored conversation, if any. |
| 102 | + prompt: The prompt config to use for the model. |
| 103 | +
|
| 104 | + Returns: |
| 105 | + An iterator of response stream events, in OpenAI Responses format. |
| 106 | + """ |
| 107 | + pass |
| 108 | + |
| 109 | + |
| 110 | +class ModelProvider(abc.ABC): |
| 111 | + """The base interface for a model provider. |
| 112 | +
|
| 113 | + Model provider is responsible for looking up Models by name. |
| 114 | + """ |
| 115 | + |
| 116 | + @abc.abstractmethod |
| 117 | + def get_model(self, model_name: str | None) -> Model: |
| 118 | + """Get a model by name. |
| 119 | +
|
| 120 | + Args: |
| 121 | + model_name: The name of the model to get. |
| 122 | +
|
| 123 | + Returns: |
| 124 | + The model. |
| 125 | + """ |
0 commit comments