Skip to content

Commit 7d8d7de

Browse files
author
Andrei Bratu
committed
QA nits from CI
1 parent a44a987 commit 7d8d7de

File tree

5 files changed

+51
-6
lines changed

5 files changed

+51
-6
lines changed

pyproject.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
[project]
2+
name = "humanloop"
3+
14
[tool.poetry]
25
name = "humanloop"
36
version = "0.8.22"

src/humanloop/otel/exporter.py

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,13 @@ def _complete_flow_log(self, span_id: int) -> None:
206206
if len(flow_children_span_ids) == 0:
207207
# All logs in the Trace have been uploaded, mark the Flow Log as complete
208208
flow_log_id = self._span_id_to_uploaded_log_id[flow_log_span_id]
209-
self._client.flows.update_log(log_id=flow_log_id, trace_status="complete")
209+
if flow_log_id is None:
210+
logger.error(
211+
"[HumanloopSpanExporter] Cannot complete Flow log %s, log ID is None",
212+
flow_log_span_id,
213+
)
214+
else:
215+
self._client.flows.update_log(log_id=flow_log_id, trace_status="complete")
210216
break
211217

212218
def _export_span_dispatch(self, span: ReadableSpan) -> None:
@@ -324,11 +330,14 @@ def _export_flow(self, span: ReadableSpan) -> None:
324330
key=HUMANLOOP_LOG_KEY,
325331
)
326332
# Spans that must be uploaded before the Flow Span is completed
327-
prerequisites = read_from_opentelemetry_span(
328-
span=span,
329-
key=HUMANLOOP_FLOW_PREREQUISITES_KEY,
330-
)
331-
self._flow_log_prerequisites[span.context.span_id] = set(prerequisites)
333+
try:
334+
prerequisites: list[int] = read_from_opentelemetry_span( # type: ignore
335+
span=span,
336+
key=HUMANLOOP_FLOW_PREREQUISITES_KEY,
337+
)
338+
self._flow_log_prerequisites[span.context.span_id] = set(prerequisites)
339+
except KeyError:
340+
self._flow_log_prerequisites[span.context.span_id] = set()
332341

333342
path: str = file_object["path"]
334343
flow: FlowKernelRequestParams

tests/utilities/test_flow_decorator.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,11 +84,15 @@ def test_decorators_without_flow(
8484
]
8585
)
8686
# WHEN exporting the spans
87+
# Wait for the prompt span to be exported; It was waiting
88+
# on the OpenAI call span to finish first
89+
time.sleep(1)
8790
spans = exporter.get_finished_spans()
8891
# THEN 3 spans arrive at the exporter in the following order:
8992
# 0. Intercepted OpenAI call, which is ignored by the exporter
9093
# 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes)
9194
# 2. Prompt Span
95+
print("WOW", [span.name for span in spans])
9296
assert len(spans) == 3
9397
assert read_from_opentelemetry_span(
9498
span=spans[1],
@@ -146,6 +150,10 @@ def test_flow_decorator_flow_in_flow(
146150
# WHEN Calling the _test_flow_in_flow function with specific messages
147151
_flow_over_flow(call_llm_messages)
148152

153+
# Wait for the Prompt span to be exported; It was asynchronously waiting
154+
# on the OpenAI call span to finish first
155+
time.sleep(1)
156+
149157
# THEN 5 spans are arrive at the exporter in the following order:
150158
# 0. Intercepted OpenAI call, which is ignored by the exporter
151159
# 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes)

tests/utilities/test_prompt_decorator.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
import time
23
from typing import Optional
34

45
import cohere
@@ -158,6 +159,11 @@ def test_prompt_decorator(
158159
model=model,
159160
messages=call_llm_messages,
160161
)
162+
163+
# Wait for the Prompt span to be exported, it is waiting
164+
# asynchronously for the LLM provider call span to finish
165+
time.sleep(1)
166+
161167
# THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt
162168
spans = exporter.get_finished_spans()
163169
assert len(spans) == 2
@@ -189,7 +195,13 @@ def test_prompt_decorator_with_hl_processor(
189195
model=model,
190196
messages=call_llm_messages,
191197
)
198+
192199
# THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt
200+
201+
# Wait for the Prompt span to be exported, it is waiting
202+
# asynchronously for the LLM provider call span to finish
203+
time.sleep(1)
204+
193205
spans = exporter.get_finished_spans()
194206
assert len(spans) == 2
195207
assert not is_humanloop_span(span=spans[0])
@@ -237,6 +249,11 @@ def test_prompt_decorator_with_defaults(
237249
model=model,
238250
messages=call_llm_messages,
239251
)
252+
253+
# Wait for the Prompt span to be exported, it is waiting
254+
# asynchronously for the LLM provider call span to finish
255+
time.sleep(1)
256+
240257
spans = exporter.get_finished_spans()
241258
# THEN the Prompt span is enhanced with information and forms a correct PromptKernel
242259
prompt = PromptKernelRequest.model_validate(
@@ -289,6 +306,10 @@ def test_prompt_attributes(
289306
messages=call_llm_messages,
290307
)
291308

309+
# Wait for the Prompt span to be exported, it is waiting
310+
# asynchronously for the LLM provider call span to finish
311+
time.sleep(1)
312+
292313
assert len(exporter.get_finished_spans()) == 2
293314

294315
prompt_kernel = PromptKernelRequest.model_validate(

tests/utilities/test_tool_decorator.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import sys
2+
import time
23
from typing import Any, Optional, TypedDict, Union
34

45
import pytest
@@ -450,6 +451,9 @@ def calculator(operation: str, num1: float, num2: float) -> float:
450451
higher_order_fn_tool(operation="add", num1=1, num2=2)
451452
calculator(operation="add", num1=1, num2=2)
452453

454+
# Processor handles HL spans asynchronously, wait for them
455+
time.sleep(1)
456+
453457
assert len(spans := exporter.get_finished_spans()) == 2
454458

455459
hl_file_higher_order_fn = read_from_opentelemetry_span(

0 commit comments

Comments
 (0)