Skip to content

Commit 8556149

Browse files
author
Andrei Bratu
committed
QA pass on python
1 parent 2da96ae commit 8556149

35 files changed

+768
-2619
lines changed

.fernignore

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,19 @@
11
# Specify files that shouldn't be modified by Fern
22

3-
src/humanloop/eval_utils
3+
4+
## Custom code
5+
6+
src/humanloop/evals
47
src/humanloop/prompt_utils.py
58
src/humanloop/client.py
69
src/humanloop/overload.py
7-
src/humanloop/context_variables.py
10+
src/humanloop/context.py
811
mypy.ini
912
README.md
10-
11-
# Directories used by SDK decorators
12-
13-
src/humanloop/utilities
13+
src/humanloop/decorators
1414
src/humanloop/otel
1515

16-
# Tests
16+
## Tests
1717

1818
tests/
1919

poetry.lock

Lines changed: 13 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,6 @@ packages = [
3030
{ include = "humanloop", from = "src"}
3131
]
3232

33-
[tool.poetry.group.dev.dependencies]
34-
protobuf = "^5.29.3"
35-
3633
[project.urls]
3734
Repository = 'https://github.com/humanloop/humanloop-python'
3835

@@ -69,6 +66,8 @@ jsonschema = "^4.23.0"
6966
numpy = "<2.0.0"
7067
onnxruntime = "<=1.19.2"
7168
openai = "^1.52.2"
69+
protobuf = "^5.29.3"
70+
types-protobuf = "^5.29.1.20250208"
7271
pandas = "^2.2.0"
7372
parse-type = ">=0.6.4"
7473
pyarrow = "^19.0.0"

src/humanloop/client.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from contextlib import contextmanager
21
import os
32
import typing
43
from typing import Any, List, Optional, Sequence
@@ -10,14 +9,14 @@
109

1110
from humanloop.core.client_wrapper import SyncClientWrapper
1211

13-
from humanloop.eval_utils import run_eval
14-
from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
12+
from humanloop.evals import run_eval
13+
from humanloop.evals.types import Dataset, Evaluator, EvaluatorCheck, File
1514

1615
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
1716
from humanloop.overload import overload_call, overload_log
18-
from humanloop.utilities.flow import flow as flow_decorator_factory
19-
from humanloop.utilities.prompt import prompt_decorator_factory
20-
from humanloop.utilities.tool import tool_decorator_factory as tool_decorator_factory
17+
from humanloop.decorators.flow import flow as flow_decorator_factory
18+
from humanloop.decorators.prompt import prompt_decorator_factory
19+
from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory
2120
from humanloop.environment import HumanloopEnvironment
2221
from humanloop.evaluations.client import EvaluationsClient
2322
from humanloop.otel import instrument_provider

src/humanloop/context.py

Lines changed: 67 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from typing import Any, Callable, Generator, Literal, Optional
55
from opentelemetry import context as context_api
66

7+
from humanloop.error import HumanloopRuntimeError
78
from humanloop.otel.constants import (
89
HUMANLOOP_CONTEXT_EVALUATION,
910
HUMANLOOP_CONTEXT_DECORATOR,
@@ -12,13 +13,13 @@
1213

1314

1415
def get_trace_id() -> Optional[str]:
15-
key = hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident()))
16-
return context_api.get_value(key=key)
16+
key = str(hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident())))
17+
return context_api.get_value(key=key) # type: ignore [return-value]
1718

1819

1920
@contextmanager
2021
def set_trace_id(flow_log_id: str) -> Generator[None, None, None]:
21-
key = hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident()))
22+
key = str(hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident())))
2223
token = context_api.attach(context_api.set_value(key=key, value=flow_log_id))
2324
yield
2425
context_api.detach(token=token)
@@ -28,59 +29,106 @@ def set_trace_id(flow_log_id: str) -> Generator[None, None, None]:
2829
class DecoratorContext:
2930
path: str
3031
type: Literal["prompt", "tool", "flow"]
31-
version: dict[str, Optional[Any]]
32+
version: dict[str, Any]
3233

3334

3435
@contextmanager
35-
def set_decorator_context(prompt_context: DecoratorContext) -> Generator[None, None, None]:
36-
key = hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident()))
36+
def set_decorator_context(
37+
decorator_context: DecoratorContext,
38+
) -> Generator[DecoratorContext, None, None]:
39+
key = str(hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident())))
3740
reset_token = context_api.attach(
3841
context_api.set_value(
3942
key=key,
40-
value=prompt_context,
43+
value=decorator_context,
4144
)
4245
)
43-
yield
46+
yield decorator_context
4447
context_api.detach(token=reset_token)
4548

4649

4750
def get_decorator_context() -> Optional[DecoratorContext]:
48-
key = hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident()))
49-
return context_api.get_value(key)
51+
key = str(hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident())))
52+
return context_api.get_value(key) # type: ignore [return-value]
5053

5154

5255
class EvaluationContext:
5356
source_datapoint_id: str
5457
run_id: str
55-
logging_callback: Callable[[str], None]
5658
file_id: str
5759
path: str
58-
logging_counter: int
60+
_logged: bool
61+
_callback: Callable[[str], None]
62+
_context_log_belongs_eval_file: bool
63+
64+
@property
65+
def logged(self) -> bool:
66+
return self._logged
67+
68+
@contextmanager
69+
def spy_log_args(
70+
self,
71+
log_args: dict[str, Any],
72+
path: Optional[str] = None,
73+
file_id: Optional[str] = None,
74+
) -> Generator[dict[str, Any], None, None]:
75+
if path is None and file_id is None:
76+
raise HumanloopRuntimeError(
77+
"Internal error: Evaluation context called without providing a path of file_id"
78+
)
79+
80+
if self.path is not None and self.path == path:
81+
self._logged = True
82+
self._context_log_belongs_eval_file = True
83+
yield {
84+
**log_args,
85+
"source_datapoint_id": self.source_datapoint_id,
86+
"run_id": self.run_id,
87+
}
88+
elif self.file_id is not None and self.file_id == file_id:
89+
self._logged = True
90+
self._context_log_belongs_eval_file = True
91+
yield {
92+
**log_args,
93+
"source_datapoint_id": self.source_datapoint_id,
94+
"run_id": self.run_id,
95+
}
96+
else:
97+
yield log_args
98+
self._context_log_belongs_eval_file = False
99+
100+
@property
101+
def callback(self) -> Optional[Callable[[str], None]]:
102+
if self._context_log_belongs_eval_file:
103+
return self._callback
104+
return None
59105

60106
def __init__(
61107
self,
62108
source_datapoint_id: str,
63109
run_id: str,
64-
logging_callback: Callable[[str], None],
110+
eval_callback: Callable[[str], None],
65111
file_id: str,
66112
path: str,
67113
):
68114
self.source_datapoint_id = source_datapoint_id
69115
self.run_id = run_id
70-
self.logging_callback = logging_callback
116+
self._callback = eval_callback
71117
self.file_id = file_id
72118
self.path = path
73-
self.logging_counter = 0
119+
self._logged = False
74120

75121

76122
@contextmanager
77-
def set_evaluation_context(evaluation_context: EvaluationContext) -> Generator[None, None, None]:
78-
key = hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident()))
123+
def set_evaluation_context(
124+
evaluation_context: EvaluationContext,
125+
) -> Generator[None, None, None]:
126+
key = str(hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident())))
79127
reset_token = context_api.attach(context_api.set_value(key, evaluation_context))
80128
yield
81129
context_api.detach(token=reset_token)
82130

83131

84132
def get_evaluation_context() -> Optional[EvaluationContext]:
85-
key = hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident()))
86-
return context_api.get_value(key)
133+
key = str(hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident())))
134+
return context_api.get_value(key) # type: ignore [return-value]

src/humanloop/decorators/flow.py

Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,133 @@
1+
import logging
2+
from functools import wraps
3+
from typing import Any, Callable, Optional, TypeVar
4+
from typing_extensions import ParamSpec
5+
6+
from opentelemetry.trace import Span, Tracer
7+
8+
from humanloop.base_client import BaseHumanloop
9+
from humanloop.context import (
10+
DecoratorContext,
11+
get_trace_id,
12+
set_decorator_context,
13+
set_trace_id,
14+
)
15+
from humanloop.evals.run import HumanloopRuntimeError
16+
from humanloop.types.chat_message import ChatMessage
17+
from humanloop.decorators.helpers import bind_args
18+
from humanloop.evals.types import File
19+
from humanloop.otel.constants import (
20+
HUMANLOOP_FILE_TYPE_KEY,
21+
HUMANLOOP_LOG_KEY,
22+
HUMANLOOP_FILE_PATH_KEY,
23+
HUMANLOOP_FLOW_SPAN_NAME,
24+
)
25+
from humanloop.otel.helpers import process_output, write_to_opentelemetry_span
26+
from humanloop.requests import FlowKernelRequestParams as FlowDict
27+
from humanloop.types.flow_log_response import FlowLogResponse
28+
29+
logger = logging.getLogger("humanloop.sdk")
30+
31+
32+
P = ParamSpec("P")
33+
R = TypeVar("R")
34+
35+
36+
def flow(
37+
client: "BaseHumanloop",
38+
opentelemetry_tracer: Tracer,
39+
path: str,
40+
attributes: Optional[dict[str, Any]] = None,
41+
):
42+
flow_kernel = {"attributes": attributes or {}}
43+
44+
def decorator(func: Callable[P, R]) -> Callable[P, Optional[R]]:
45+
decorator_path = path or func.__name__
46+
file_type = "flow"
47+
48+
@wraps(func)
49+
def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
50+
span: Span
51+
with set_decorator_context(
52+
DecoratorContext(
53+
path=decorator_path,
54+
type="flow",
55+
version=flow_kernel,
56+
)
57+
) as decorator_context:
58+
with opentelemetry_tracer.start_as_current_span(HUMANLOOP_FLOW_SPAN_NAME) as span: # type: ignore
59+
span.set_attribute(HUMANLOOP_FILE_PATH_KEY, decorator_path)
60+
span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type)
61+
trace_id = get_trace_id()
62+
func_args = bind_args(func, args, kwargs)
63+
64+
# Create the trace ahead so we have a parent ID to reference
65+
init_log_inputs = {
66+
"inputs": {k: v for k, v in func_args.items() if k != "messages"},
67+
"messages": func_args.get("messages"),
68+
"trace_parent_id": trace_id,
69+
}
70+
this_flow_log: FlowLogResponse = client.flows._log( # type: ignore [attr-defined]
71+
path=decorator_context.path,
72+
flow=decorator_context.version,
73+
log_status="incomplete",
74+
**init_log_inputs,
75+
)
76+
77+
with set_trace_id(this_flow_log.id):
78+
func_output: Optional[R]
79+
log_output: Optional[str]
80+
log_error: Optional[str]
81+
log_output_message: Optional[ChatMessage]
82+
try:
83+
func_output = func(*args, **kwargs)
84+
if (
85+
isinstance(func_output, dict)
86+
and len(func_output.keys()) == 2
87+
and "role" in func_output
88+
and "content" in func_output
89+
):
90+
log_output_message = func_output # type: ignore [assignment]
91+
log_output = None
92+
else:
93+
log_output = process_output(func=func, output=func_output)
94+
log_output_message = None
95+
log_error = None
96+
except HumanloopRuntimeError as e:
97+
# Critical error, re-raise
98+
client.logs.delete(id=this_flow_log.id)
99+
span.record_exception(e)
100+
raise e
101+
except Exception as e:
102+
logger.error(f"Error calling {func.__name__}: {e}")
103+
log_output = None
104+
log_output_message = None
105+
log_error = str(e)
106+
func_output = None
107+
108+
updated_flow_log = {
109+
"log_status": "complete",
110+
"output": log_output,
111+
"error": log_error,
112+
"output_message": log_output_message,
113+
"id": this_flow_log.id,
114+
}
115+
# Write the Flow Log to the Span on HL_LOG_OT_KEY
116+
write_to_opentelemetry_span(
117+
span=span, # type: ignore [arg-type]
118+
key=HUMANLOOP_LOG_KEY,
119+
value=updated_flow_log, # type: ignore
120+
)
121+
# Return the output of the decorated function
122+
return func_output # type: ignore [return-value]
123+
124+
wrapper.file = File( # type: ignore
125+
path=decorator_path,
126+
type=file_type, # type: ignore [arg-type, typeddict-item]
127+
version=FlowDict(**flow_kernel), # type: ignore
128+
callable=wrapper,
129+
)
130+
131+
return wrapper
132+
133+
return decorator

0 commit comments

Comments
 (0)