|
1 | 1 | import os |
| 2 | +import time |
2 | 3 | from typing import Optional |
3 | 4 |
|
4 | 5 | import cohere |
@@ -158,6 +159,11 @@ def test_prompt_decorator( |
158 | 159 | model=model, |
159 | 160 | messages=call_llm_messages, |
160 | 161 | ) |
| 162 | + |
| 163 | + # Wait for the Prompt span to be exported, it is waiting |
| 164 | + # asynchronously for the LLM provider call span to finish |
| 165 | + time.sleep(1) |
| 166 | + |
161 | 167 | # THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt |
162 | 168 | spans = exporter.get_finished_spans() |
163 | 169 | assert len(spans) == 2 |
@@ -189,7 +195,13 @@ def test_prompt_decorator_with_hl_processor( |
189 | 195 | model=model, |
190 | 196 | messages=call_llm_messages, |
191 | 197 | ) |
| 198 | + |
192 | 199 | # THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt |
| 200 | + |
| 201 | + # Wait for the Prompt span to be exported, it is waiting |
| 202 | + # asynchronously for the LLM provider call span to finish |
| 203 | + time.sleep(1) |
| 204 | + |
193 | 205 | spans = exporter.get_finished_spans() |
194 | 206 | assert len(spans) == 2 |
195 | 207 | assert not is_humanloop_span(span=spans[0]) |
@@ -237,6 +249,11 @@ def test_prompt_decorator_with_defaults( |
237 | 249 | model=model, |
238 | 250 | messages=call_llm_messages, |
239 | 251 | ) |
| 252 | + |
| 253 | + # Wait for the Prompt span to be exported, it is waiting |
| 254 | + # asynchronously for the LLM provider call span to finish |
| 255 | + time.sleep(1) |
| 256 | + |
240 | 257 | spans = exporter.get_finished_spans() |
241 | 258 | # THEN the Prompt span is enhanced with information and forms a correct PromptKernel |
242 | 259 | prompt = PromptKernelRequest.model_validate( |
@@ -289,6 +306,10 @@ def test_prompt_attributes( |
289 | 306 | messages=call_llm_messages, |
290 | 307 | ) |
291 | 308 |
|
| 309 | + # Wait for the Prompt span to be exported, it is waiting |
| 310 | + # asynchronously for the LLM provider call span to finish |
| 311 | + time.sleep(1) |
| 312 | + |
292 | 313 | assert len(exporter.get_finished_spans()) == 2 |
293 | 314 |
|
294 | 315 | prompt_kernel = PromptKernelRequest.model_validate( |
|
0 commit comments