Skip to content

Commit 0aa95d6

Browse files
jaycee-licopybara-github
authored andcommitted
chore: Internal cleanup
PiperOrigin-RevId: 896138222
1 parent 9ed3759 commit 0aa95d6

2 files changed

Lines changed: 40 additions & 40 deletions

File tree

vertexai/_genai/evals.py

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -276,9 +276,6 @@ def _EvaluateInstancesRequestParameters_to_vertex(
276276
_EvaluationInstance_to_vertex(getv(from_object, ["instance"]), to_object),
277277
)
278278

279-
if getv(from_object, ["config"]) is not None:
280-
setv(to_object, ["config"], getv(from_object, ["config"]))
281-
282279
if getv(from_object, ["metric_sources"]) is not None:
283280
setv(
284281
to_object,
@@ -289,6 +286,9 @@ def _EvaluateInstancesRequestParameters_to_vertex(
289286
],
290287
)
291288

289+
if getv(from_object, ["config"]) is not None:
290+
setv(to_object, ["config"], getv(from_object, ["config"]))
291+
292292
return to_object
293293

294294

@@ -450,18 +450,18 @@ def _EvaluationRunMetric_from_vertex(
450450
if getv(from_object, ["metric"]) is not None:
451451
setv(to_object, ["metric"], getv(from_object, ["metric"]))
452452

453-
if getv(from_object, ["metricConfig"]) is not None:
453+
if getv(from_object, ["metricResourceName"]) is not None:
454454
setv(
455455
to_object,
456-
["metric_config"],
457-
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
456+
["metric_resource_name"],
457+
getv(from_object, ["metricResourceName"]),
458458
)
459459

460-
if getv(from_object, ["metricResourceName"]) is not None:
460+
if getv(from_object, ["metricConfig"]) is not None:
461461
setv(
462462
to_object,
463-
["metric_resource_name"],
464-
getv(from_object, ["metricResourceName"]),
463+
["metric_config"],
464+
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
465465
)
466466

467467
return to_object
@@ -475,18 +475,18 @@ def _EvaluationRunMetric_to_vertex(
475475
if getv(from_object, ["metric"]) is not None:
476476
setv(to_object, ["metric"], getv(from_object, ["metric"]))
477477

478-
if getv(from_object, ["metric_config"]) is not None:
478+
if getv(from_object, ["metric_resource_name"]) is not None:
479479
setv(
480480
to_object,
481-
["metricConfig"],
482-
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
481+
["metricResourceName"],
482+
getv(from_object, ["metric_resource_name"]),
483483
)
484484

485-
if getv(from_object, ["metric_resource_name"]) is not None:
485+
if getv(from_object, ["metric_config"]) is not None:
486486
setv(
487487
to_object,
488-
["metricResourceName"],
489-
getv(from_object, ["metric_resource_name"]),
488+
["metricConfig"],
489+
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
490490
)
491491

492492
return to_object
@@ -582,16 +582,16 @@ def _GenerateInstanceRubricsRequest_to_vertex(
582582
getv(from_object, ["rubric_generation_spec"]),
583583
)
584584

585-
if getv(from_object, ["config"]) is not None:
586-
setv(to_object, ["config"], getv(from_object, ["config"]))
587-
588585
if getv(from_object, ["metric_resource_name"]) is not None:
589586
setv(
590587
to_object,
591588
["metricResourceName"],
592589
getv(from_object, ["metric_resource_name"]),
593590
)
594591

592+
if getv(from_object, ["config"]) is not None:
593+
setv(to_object, ["config"], getv(from_object, ["config"]))
594+
595595
return to_object
596596

597597

@@ -1240,8 +1240,8 @@ def _evaluate_instances(
12401240
autorater_config: Optional[genai_types.AutoraterConfigOrDict] = None,
12411241
metrics: Optional[list[types.MetricOrDict]] = None,
12421242
instance: Optional[types.EvaluationInstanceOrDict] = None,
1243-
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
12441243
metric_sources: Optional[list[types.MetricSourceOrDict]] = None,
1244+
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
12451245
) -> types.EvaluateInstancesResponse:
12461246
"""
12471247
Evaluates instances based on a given metric.
@@ -1261,8 +1261,8 @@ def _evaluate_instances(
12611261
autorater_config=autorater_config,
12621262
metrics=metrics,
12631263
instance=instance,
1264-
config=config,
12651264
metric_sources=metric_sources,
1265+
config=config,
12661266
)
12671267

12681268
request_url_dict: Optional[dict[str, str]]
@@ -1484,8 +1484,8 @@ def _generate_rubrics(
14841484
genai_types.PredefinedMetricSpecOrDict
14851485
] = None,
14861486
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None,
1487-
config: Optional[types.RubricGenerationConfigOrDict] = None,
14881487
metric_resource_name: Optional[str] = None,
1488+
config: Optional[types.RubricGenerationConfigOrDict] = None,
14891489
) -> types.GenerateInstanceRubricsResponse:
14901490
"""
14911491
Generates rubrics for a given prompt.
@@ -1495,8 +1495,8 @@ def _generate_rubrics(
14951495
contents=contents,
14961496
predefined_rubric_generation_spec=predefined_rubric_generation_spec,
14971497
rubric_generation_spec=rubric_generation_spec,
1498-
config=config,
14991498
metric_resource_name=metric_resource_name,
1499+
config=config,
15001500
)
15011501

15021502
request_url_dict: Optional[dict[str, str]]
@@ -3167,8 +3167,8 @@ async def _evaluate_instances(
31673167
autorater_config: Optional[genai_types.AutoraterConfigOrDict] = None,
31683168
metrics: Optional[list[types.MetricOrDict]] = None,
31693169
instance: Optional[types.EvaluationInstanceOrDict] = None,
3170-
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
31713170
metric_sources: Optional[list[types.MetricSourceOrDict]] = None,
3171+
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
31723172
) -> types.EvaluateInstancesResponse:
31733173
"""
31743174
Evaluates instances based on a given metric.
@@ -3188,8 +3188,8 @@ async def _evaluate_instances(
31883188
autorater_config=autorater_config,
31893189
metrics=metrics,
31903190
instance=instance,
3191-
config=config,
31923191
metric_sources=metric_sources,
3192+
config=config,
31933193
)
31943194

31953195
request_url_dict: Optional[dict[str, str]]
@@ -3417,8 +3417,8 @@ async def _generate_rubrics(
34173417
genai_types.PredefinedMetricSpecOrDict
34183418
] = None,
34193419
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None,
3420-
config: Optional[types.RubricGenerationConfigOrDict] = None,
34213420
metric_resource_name: Optional[str] = None,
3421+
config: Optional[types.RubricGenerationConfigOrDict] = None,
34223422
) -> types.GenerateInstanceRubricsResponse:
34233423
"""
34243424
Generates rubrics for a given prompt.
@@ -3428,8 +3428,8 @@ async def _generate_rubrics(
34283428
contents=contents,
34293429
predefined_rubric_generation_spec=predefined_rubric_generation_spec,
34303430
rubric_generation_spec=rubric_generation_spec,
3431-
config=config,
34323431
metric_resource_name=metric_resource_name,
3432+
config=config,
34333433
)
34343434

34353435
request_url_dict: Optional[dict[str, str]]

vertexai/_genai/types/common.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2217,13 +2217,13 @@ class EvaluationRunMetric(_common.BaseModel):
22172217
metric: Optional[str] = Field(
22182218
default=None, description="""The name of the metric."""
22192219
)
2220-
metric_config: Optional[UnifiedMetric] = Field(
2221-
default=None, description="""The unified metric used for evaluation run."""
2222-
)
22232220
metric_resource_name: Optional[str] = Field(
22242221
default=None,
22252222
description="""The resource name of the metric definition. Example: projects/{project}/locations/{location}/evaluationMetrics/{evaluation_metric_id}""",
22262223
)
2224+
metric_config: Optional[UnifiedMetric] = Field(
2225+
default=None, description="""The unified metric used for evaluation run."""
2226+
)
22272227

22282228

22292229
class EvaluationRunMetricDict(TypedDict, total=False):
@@ -2232,12 +2232,12 @@ class EvaluationRunMetricDict(TypedDict, total=False):
22322232
metric: Optional[str]
22332233
"""The name of the metric."""
22342234

2235-
metric_config: Optional[UnifiedMetricDict]
2236-
"""The unified metric used for evaluation run."""
2237-
22382235
metric_resource_name: Optional[str]
22392236
"""The resource name of the metric definition. Example: projects/{project}/locations/{location}/evaluationMetrics/{evaluation_metric_id}"""
22402237

2238+
metric_config: Optional[UnifiedMetricDict]
2239+
"""The unified metric used for evaluation run."""
2240+
22412241

22422242
EvaluationRunMetricOrDict = Union[EvaluationRunMetric, EvaluationRunMetricDict]
22432243

@@ -4166,10 +4166,10 @@ class _EvaluateInstancesRequestParameters(_common.BaseModel):
41664166
instance: Optional[EvaluationInstance] = Field(
41674167
default=None, description="""The instance to be evaluated."""
41684168
)
4169-
config: Optional[EvaluateInstancesConfig] = Field(default=None, description="""""")
41704169
metric_sources: Optional[list[MetricSource]] = Field(
41714170
default=None, description="""The metrics used for evaluation."""
41724171
)
4172+
config: Optional[EvaluateInstancesConfig] = Field(default=None, description="""""")
41734173

41744174

41754175
class _EvaluateInstancesRequestParametersDict(TypedDict, total=False):
@@ -4216,12 +4216,12 @@ class _EvaluateInstancesRequestParametersDict(TypedDict, total=False):
42164216
instance: Optional[EvaluationInstanceDict]
42174217
"""The instance to be evaluated."""
42184218

4219-
config: Optional[EvaluateInstancesConfigDict]
4220-
""""""
4221-
42224219
metric_sources: Optional[list[MetricSourceDict]]
42234220
"""The metrics used for evaluation."""
42244221

4222+
config: Optional[EvaluateInstancesConfigDict]
4223+
""""""
4224+
42254225

42264226
_EvaluateInstancesRequestParametersOrDict = Union[
42274227
_EvaluateInstancesRequestParameters, _EvaluateInstancesRequestParametersDict
@@ -5124,11 +5124,11 @@ class _GenerateInstanceRubricsRequest(_common.BaseModel):
51245124
default=None,
51255125
description="""Specification for how the rubrics should be generated.""",
51265126
)
5127-
config: Optional[RubricGenerationConfig] = Field(default=None, description="""""")
51285127
metric_resource_name: Optional[str] = Field(
51295128
default=None,
51305129
description="""Registered metric resource name. If this field is set, the configuration provided in this field is used for rubric generation. The `predefined_rubric_generation_spec` and `rubric_generation_spec` fields will be ignored.""",
51315130
)
5131+
config: Optional[RubricGenerationConfig] = Field(default=None, description="""""")
51325132

51335133

51345134
class _GenerateInstanceRubricsRequestDict(TypedDict, total=False):
@@ -5148,12 +5148,12 @@ class _GenerateInstanceRubricsRequestDict(TypedDict, total=False):
51485148
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecDict]
51495149
"""Specification for how the rubrics should be generated."""
51505150

5151-
config: Optional[RubricGenerationConfigDict]
5152-
""""""
5153-
51545151
metric_resource_name: Optional[str]
51555152
"""Registered metric resource name. If this field is set, the configuration provided in this field is used for rubric generation. The `predefined_rubric_generation_spec` and `rubric_generation_spec` fields will be ignored."""
51565153

5154+
config: Optional[RubricGenerationConfigDict]
5155+
""""""
5156+
51575157

51585158
_GenerateInstanceRubricsRequestOrDict = Union[
51595159
_GenerateInstanceRubricsRequest, _GenerateInstanceRubricsRequestDict

0 commit comments

Comments
 (0)