Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions scripts/populate_tox/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,9 @@
},
"litellm": {
"package": "litellm",
"deps": {
"*": ["anthropic", "google-genai"],
},
},
"litestar": {
"package": "litestar",
Expand Down
36 changes: 19 additions & 17 deletions scripts/populate_tox/package_dependencies.jsonl

Large diffs are not rendered by default.

70 changes: 35 additions & 35 deletions scripts/populate_tox/releases.jsonl

Large diffs are not rendered by default.

91 changes: 90 additions & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,18 @@
openai = None


try:
import anthropic
except ImportError:
anthropic = None


try:
import google
except ImportError:
google = None


from tests import _warning_recorder, _warning_recorder_mgr

from typing import TYPE_CHECKING
Expand Down Expand Up @@ -1050,7 +1062,12 @@ def inner(response_content, serialize_pydantic=False, request_headers=None):
)

if serialize_pydantic:
response_content = json.dumps(response_content.model_dump()).encode("utf-8")
response_content = json.dumps(
response_content.model_dump(
by_alias=True,
exclude_none=True,
)
).encode("utf-8")

response = HttpxResponse(
200,
Expand Down Expand Up @@ -1177,6 +1194,30 @@ def streaming_chat_completions_model_response():
]


@pytest.fixture
def nonstreaming_chat_completions_model_response():
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Food for thought as a future improvement - if we're thinking we may add increasingly more AI integration-specific test fixtures, we should move them into a separate file/folder and load them using pytest_plugins within this file so they're still automatically discovered.

return openai.types.chat.ChatCompletion(
id="chatcmpl-test",
choices=[
openai.types.chat.chat_completion.Choice(
index=0,
finish_reason="stop",
message=openai.types.chat.ChatCompletionMessage(
role="assistant", content="Test response"
),
)
],
created=1234567890,
model="gpt-3.5-turbo",
object="chat.completion",
usage=openai.types.CompletionUsage(
prompt_tokens=10,
completion_tokens=20,
total_tokens=30,
),
)


@pytest.fixture
def nonstreaming_responses_model_response():
return openai.types.responses.Response(
Expand Down Expand Up @@ -1216,6 +1257,54 @@ def nonstreaming_responses_model_response():
)


@pytest.fixture
def nonstreaming_anthropic_model_response():
return anthropic.types.Message(
id="msg_123",
type="message",
role="assistant",
model="claude-3-opus-20240229",
content=[
anthropic.types.TextBlock(
type="text",
text="Hello, how can I help you?",
)
],
stop_reason="end_turn",
stop_sequence=None,
usage=anthropic.types.Usage(
input_tokens=10,
output_tokens=20,
),
)


@pytest.fixture
def nonstreaming_google_genai_model_response():
return google.genai.types.GenerateContentResponse(
response_id="resp_123",
candidates=[
google.genai.types.Candidate(
content=google.genai.types.Content(
role="model",
parts=[
google.genai.types.Part(
text="Hello, how can I help you?",
)
],
),
finish_reason="STOP",
)
],
model_version="gemini/gemini-pro",
usage_metadata=google.genai.types.GenerateContentResponseUsageMetadata(
prompt_token_count=10,
candidates_token_count=20,
total_token_count=30,
),
)


@pytest.fixture
def responses_tool_call_model_responses():
def inner(
Expand Down
Loading
Loading