Skip to content

Commit edb92f9

Browse files
jaycee-licopybara-github
authored andcommitted
chore: Internal cleanup
FUTURE_COPYBARA_INTEGRATE_REVIEW=#6572 from googleapis:release-please--branches--main 1448130 PiperOrigin-RevId: 896138222
1 parent 9722998 commit edb92f9

2 files changed

Lines changed: 40 additions & 40 deletions

File tree

vertexai/_genai/evals.py

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -276,9 +276,6 @@ def _EvaluateInstancesRequestParameters_to_vertex(
276276
_EvaluationInstance_to_vertex(getv(from_object, ["instance"]), to_object),
277277
)
278278

279-
if getv(from_object, ["config"]) is not None:
280-
setv(to_object, ["config"], getv(from_object, ["config"]))
281-
282279
if getv(from_object, ["metric_sources"]) is not None:
283280
setv(
284281
to_object,
@@ -289,6 +286,9 @@ def _EvaluateInstancesRequestParameters_to_vertex(
289286
],
290287
)
291288

289+
if getv(from_object, ["config"]) is not None:
290+
setv(to_object, ["config"], getv(from_object, ["config"]))
291+
292292
return to_object
293293

294294

@@ -464,18 +464,18 @@ def _EvaluationRunMetric_from_vertex(
464464
if getv(from_object, ["metric"]) is not None:
465465
setv(to_object, ["metric"], getv(from_object, ["metric"]))
466466

467-
if getv(from_object, ["metricConfig"]) is not None:
467+
if getv(from_object, ["metricResourceName"]) is not None:
468468
setv(
469469
to_object,
470-
["metric_config"],
471-
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
470+
["metric_resource_name"],
471+
getv(from_object, ["metricResourceName"]),
472472
)
473473

474-
if getv(from_object, ["metricResourceName"]) is not None:
474+
if getv(from_object, ["metricConfig"]) is not None:
475475
setv(
476476
to_object,
477-
["metric_resource_name"],
478-
getv(from_object, ["metricResourceName"]),
477+
["metric_config"],
478+
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
479479
)
480480

481481
return to_object
@@ -489,18 +489,18 @@ def _EvaluationRunMetric_to_vertex(
489489
if getv(from_object, ["metric"]) is not None:
490490
setv(to_object, ["metric"], getv(from_object, ["metric"]))
491491

492-
if getv(from_object, ["metric_config"]) is not None:
492+
if getv(from_object, ["metric_resource_name"]) is not None:
493493
setv(
494494
to_object,
495-
["metricConfig"],
496-
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
495+
["metricResourceName"],
496+
getv(from_object, ["metric_resource_name"]),
497497
)
498498

499-
if getv(from_object, ["metric_resource_name"]) is not None:
499+
if getv(from_object, ["metric_config"]) is not None:
500500
setv(
501501
to_object,
502-
["metricResourceName"],
503-
getv(from_object, ["metric_resource_name"]),
502+
["metricConfig"],
503+
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
504504
)
505505

506506
return to_object
@@ -596,16 +596,16 @@ def _GenerateInstanceRubricsRequest_to_vertex(
596596
getv(from_object, ["rubric_generation_spec"]),
597597
)
598598

599-
if getv(from_object, ["config"]) is not None:
600-
setv(to_object, ["config"], getv(from_object, ["config"]))
601-
602599
if getv(from_object, ["metric_resource_name"]) is not None:
603600
setv(
604601
to_object,
605602
["metricResourceName"],
606603
getv(from_object, ["metric_resource_name"]),
607604
)
608605

606+
if getv(from_object, ["config"]) is not None:
607+
setv(to_object, ["config"], getv(from_object, ["config"]))
608+
609609
return to_object
610610

611611

@@ -1254,8 +1254,8 @@ def _evaluate_instances(
12541254
autorater_config: Optional[genai_types.AutoraterConfigOrDict] = None,
12551255
metrics: Optional[list[types.MetricOrDict]] = None,
12561256
instance: Optional[types.EvaluationInstanceOrDict] = None,
1257-
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
12581257
metric_sources: Optional[list[types.MetricSourceOrDict]] = None,
1258+
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
12591259
) -> types.EvaluateInstancesResponse:
12601260
"""
12611261
Evaluates instances based on a given metric.
@@ -1275,8 +1275,8 @@ def _evaluate_instances(
12751275
autorater_config=autorater_config,
12761276
metrics=metrics,
12771277
instance=instance,
1278-
config=config,
12791278
metric_sources=metric_sources,
1279+
config=config,
12801280
)
12811281

12821282
request_url_dict: Optional[dict[str, str]]
@@ -1498,8 +1498,8 @@ def _generate_rubrics(
14981498
genai_types.PredefinedMetricSpecOrDict
14991499
] = None,
15001500
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None,
1501-
config: Optional[types.RubricGenerationConfigOrDict] = None,
15021501
metric_resource_name: Optional[str] = None,
1502+
config: Optional[types.RubricGenerationConfigOrDict] = None,
15031503
) -> types.GenerateInstanceRubricsResponse:
15041504
"""
15051505
Generates rubrics for a given prompt.
@@ -1509,8 +1509,8 @@ def _generate_rubrics(
15091509
contents=contents,
15101510
predefined_rubric_generation_spec=predefined_rubric_generation_spec,
15111511
rubric_generation_spec=rubric_generation_spec,
1512-
config=config,
15131512
metric_resource_name=metric_resource_name,
1513+
config=config,
15141514
)
15151515

15161516
request_url_dict: Optional[dict[str, str]]
@@ -3220,8 +3220,8 @@ async def _evaluate_instances(
32203220
autorater_config: Optional[genai_types.AutoraterConfigOrDict] = None,
32213221
metrics: Optional[list[types.MetricOrDict]] = None,
32223222
instance: Optional[types.EvaluationInstanceOrDict] = None,
3223-
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
32243223
metric_sources: Optional[list[types.MetricSourceOrDict]] = None,
3224+
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
32253225
) -> types.EvaluateInstancesResponse:
32263226
"""
32273227
Evaluates instances based on a given metric.
@@ -3241,8 +3241,8 @@ async def _evaluate_instances(
32413241
autorater_config=autorater_config,
32423242
metrics=metrics,
32433243
instance=instance,
3244-
config=config,
32453244
metric_sources=metric_sources,
3245+
config=config,
32463246
)
32473247

32483248
request_url_dict: Optional[dict[str, str]]
@@ -3470,8 +3470,8 @@ async def _generate_rubrics(
34703470
genai_types.PredefinedMetricSpecOrDict
34713471
] = None,
34723472
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None,
3473-
config: Optional[types.RubricGenerationConfigOrDict] = None,
34743473
metric_resource_name: Optional[str] = None,
3474+
config: Optional[types.RubricGenerationConfigOrDict] = None,
34753475
) -> types.GenerateInstanceRubricsResponse:
34763476
"""
34773477
Generates rubrics for a given prompt.
@@ -3481,8 +3481,8 @@ async def _generate_rubrics(
34813481
contents=contents,
34823482
predefined_rubric_generation_spec=predefined_rubric_generation_spec,
34833483
rubric_generation_spec=rubric_generation_spec,
3484-
config=config,
34853484
metric_resource_name=metric_resource_name,
3485+
config=config,
34863486
)
34873487

34883488
request_url_dict: Optional[dict[str, str]]

vertexai/_genai/types/common.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2228,13 +2228,13 @@ class EvaluationRunMetric(_common.BaseModel):
22282228
metric: Optional[str] = Field(
22292229
default=None, description="""The name of the metric."""
22302230
)
2231-
metric_config: Optional[UnifiedMetric] = Field(
2232-
default=None, description="""The unified metric used for evaluation run."""
2233-
)
22342231
metric_resource_name: Optional[str] = Field(
22352232
default=None,
22362233
description="""The resource name of the metric definition. Example: projects/{project}/locations/{location}/evaluationMetrics/{evaluation_metric_id}""",
22372234
)
2235+
metric_config: Optional[UnifiedMetric] = Field(
2236+
default=None, description="""The unified metric used for evaluation run."""
2237+
)
22382238

22392239

22402240
class EvaluationRunMetricDict(TypedDict, total=False):
@@ -2243,12 +2243,12 @@ class EvaluationRunMetricDict(TypedDict, total=False):
22432243
metric: Optional[str]
22442244
"""The name of the metric."""
22452245

2246-
metric_config: Optional[UnifiedMetricDict]
2247-
"""The unified metric used for evaluation run."""
2248-
22492246
metric_resource_name: Optional[str]
22502247
"""The resource name of the metric definition. Example: projects/{project}/locations/{location}/evaluationMetrics/{evaluation_metric_id}"""
22512248

2249+
metric_config: Optional[UnifiedMetricDict]
2250+
"""The unified metric used for evaluation run."""
2251+
22522252

22532253
EvaluationRunMetricOrDict = Union[EvaluationRunMetric, EvaluationRunMetricDict]
22542254

@@ -4412,10 +4412,10 @@ class _EvaluateInstancesRequestParameters(_common.BaseModel):
44124412
instance: Optional[EvaluationInstance] = Field(
44134413
default=None, description="""The instance to be evaluated."""
44144414
)
4415-
config: Optional[EvaluateInstancesConfig] = Field(default=None, description="""""")
44164415
metric_sources: Optional[list[MetricSource]] = Field(
44174416
default=None, description="""The metrics used for evaluation."""
44184417
)
4418+
config: Optional[EvaluateInstancesConfig] = Field(default=None, description="""""")
44194419

44204420

44214421
class _EvaluateInstancesRequestParametersDict(TypedDict, total=False):
@@ -4462,12 +4462,12 @@ class _EvaluateInstancesRequestParametersDict(TypedDict, total=False):
44624462
instance: Optional[EvaluationInstanceDict]
44634463
"""The instance to be evaluated."""
44644464

4465-
config: Optional[EvaluateInstancesConfigDict]
4466-
""""""
4467-
44684465
metric_sources: Optional[list[MetricSourceDict]]
44694466
"""The metrics used for evaluation."""
44704467

4468+
config: Optional[EvaluateInstancesConfigDict]
4469+
""""""
4470+
44714471

44724472
_EvaluateInstancesRequestParametersOrDict = Union[
44734473
_EvaluateInstancesRequestParameters, _EvaluateInstancesRequestParametersDict
@@ -5162,11 +5162,11 @@ class _GenerateInstanceRubricsRequest(_common.BaseModel):
51625162
default=None,
51635163
description="""Specification for how the rubrics should be generated.""",
51645164
)
5165-
config: Optional[RubricGenerationConfig] = Field(default=None, description="""""")
51665165
metric_resource_name: Optional[str] = Field(
51675166
default=None,
51685167
description="""Registered metric resource name. If this field is set, the configuration provided in this field is used for rubric generation. The `predefined_rubric_generation_spec` and `rubric_generation_spec` fields will be ignored.""",
51695168
)
5169+
config: Optional[RubricGenerationConfig] = Field(default=None, description="""""")
51705170

51715171

51725172
class _GenerateInstanceRubricsRequestDict(TypedDict, total=False):
@@ -5186,12 +5186,12 @@ class _GenerateInstanceRubricsRequestDict(TypedDict, total=False):
51865186
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecDict]
51875187
"""Specification for how the rubrics should be generated."""
51885188

5189-
config: Optional[RubricGenerationConfigDict]
5190-
""""""
5191-
51925189
metric_resource_name: Optional[str]
51935190
"""Registered metric resource name. If this field is set, the configuration provided in this field is used for rubric generation. The `predefined_rubric_generation_spec` and `rubric_generation_spec` fields will be ignored."""
51945191

5192+
config: Optional[RubricGenerationConfigDict]
5193+
""""""
5194+
51955195

51965196
_GenerateInstanceRubricsRequestOrDict = Union[
51975197
_GenerateInstanceRubricsRequest, _GenerateInstanceRubricsRequestDict

0 commit comments

Comments
 (0)