Skip to content

Commit 0f6dcc1

Browse files
committed
fix: address review comments for OpenAI Responses API model
- Add OpenAIResponsesModel to lazy loading in strands.models.__init__.py - Remove 'Step X:' prefixes from logging to match project conventions - Fix unused variable in test fixture by removing model_name from params
1 parent 37e719f commit 0f6dcc1

3 files changed

Lines changed: 12 additions & 8 deletions

File tree

src/strands/models/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,10 @@ def __getattr__(name: str) -> Any:
5555
from .openai import OpenAIModel
5656

5757
return OpenAIModel
58+
if name == "OpenAIResponsesModel":
59+
from .openai_responses import OpenAIResponsesModel
60+
61+
return OpenAIResponsesModel
5862
if name == "SageMakerAIModel":
5963
from .sagemaker import SageMakerAIModel
6064

src/strands/models/openai_responses.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -198,17 +198,17 @@ async def stream(
198198
ContextWindowOverflowException: If the input exceeds the model's context window.
199199
ModelThrottledException: If the request is throttled by OpenAI (rate limits).
200200
"""
201-
logger.debug("Step 1: formatting request for OpenAI Responses API")
201+
logger.debug("formatting request for OpenAI Responses API")
202202
request = self._format_request(messages, tool_specs, system_prompt, tool_choice)
203-
logger.debug("Step 1: formatted request=<%s>", request)
203+
logger.debug("formatted request=<%s>", request)
204204

205-
logger.debug("Step 2: invoking OpenAI Responses API model")
205+
logger.debug("invoking OpenAI Responses API model")
206206

207207
async with openai.AsyncOpenAI(**self.client_args) as client:
208208
try:
209209
response = await client.responses.create(**request)
210210

211-
logger.debug("Step 3: streaming response from OpenAI Responses API model")
211+
logger.debug("streaming response from OpenAI Responses API model")
212212

213213
yield self._format_chunk({"chunk_type": "message_start"})
214214

@@ -335,7 +335,7 @@ async def stream(
335335
if final_usage:
336336
yield self._format_chunk({"chunk_type": "metadata", "data": final_usage})
337337

338-
logger.debug("Step 4: finished streaming response from OpenAI Responses API model")
338+
logger.debug("finished streaming response from OpenAI Responses API model")
339339

340340
@override
341341
async def structured_output(

tests_integ/models/test_model_openai.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,12 @@
1717

1818
@pytest.fixture(
1919
params=[
20-
("openai", OpenAIModel, "gpt-4o"),
21-
("openai_responses", OpenAIResponsesModel, "gpt-4o"),
20+
(OpenAIModel, "gpt-4o"),
21+
(OpenAIResponsesModel, "gpt-4o"),
2222
]
2323
)
2424
def model(request):
25-
model_name, model_class, model_id = request.param
25+
model_class, model_id = request.param
2626
return model_class(
2727
model_id=model_id,
2828
client_args={

0 commit comments

Comments
 (0)