Skip to content

Commit 8eaeb45

Browse files
authored
Merge branch 'main' into fix/uri_params
2 parents cc5be3f + 6c1dae7 commit 8eaeb45

File tree

6 files changed

+103
-100
lines changed

6 files changed

+103
-100
lines changed

google/genai/_live_converters.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -226,6 +226,11 @@ def _GenerationConfig_to_vertex(
226226
if getv(from_object, ['top_p']) is not None:
227227
setv(to_object, ['topP'], getv(from_object, ['top_p']))
228228

229+
if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
230+
raise ValueError(
231+
'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
232+
)
233+
229234
return to_object
230235

231236

google/genai/models.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2398,6 +2398,11 @@ def _GenerationConfig_to_vertex(
23982398
if getv(from_object, ['top_p']) is not None:
23992399
setv(to_object, ['topP'], getv(from_object, ['top_p']))
24002400

2401+
if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
2402+
raise ValueError(
2403+
'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
2404+
)
2405+
24012406
return to_object
24022407

24032408

google/genai/tests/live/test_live.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -720,7 +720,11 @@ async def test_bidi_setup_to_api_speech_config(vertexai):
720720
result = await get_connect_message(
721721
mock_api_client(vertexai=vertexai), model='test_model', config=config_dict
722722
)
723-
assert result == expected_result
723+
assert types.LiveClientMessage._from_response(
724+
response=result, kwargs=None
725+
) == types.LiveClientMessage._from_response(
726+
response=expected_result, kwargs=None
727+
)
724728
# Config is a LiveConnectConfig
725729
config = types.LiveConnectConfig(
726730
speech_config=types.SpeechConfig(
@@ -745,7 +749,11 @@ async def test_bidi_setup_to_api_speech_config(vertexai):
745749
mock_api_client(vertexai=vertexai),
746750
model='test_model', config=config
747751
)
748-
assert result == expected_result
752+
assert types.LiveClientMessage._from_response(
753+
response=result, kwargs=None
754+
) == types.LiveClientMessage._from_response(
755+
response=expected_result, kwargs=None
756+
)
749757

750758

751759
@pytest.mark.parametrize('vertexai', [True, False])

google/genai/tests/operations/test_get.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2025 Google LLC
1+
# Copyright 2025 Google LLC
22
#
33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.

google/genai/tunings.py

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -551,36 +551,6 @@ def _TuningJob_from_mldev(
551551
_TunedModel_from_mldev(getv(from_object, ['_self']), to_object),
552552
)
553553

554-
if getv(from_object, ['customBaseModel']) is not None:
555-
setv(
556-
to_object, ['custom_base_model'], getv(from_object, ['customBaseModel'])
557-
)
558-
559-
if getv(from_object, ['experiment']) is not None:
560-
setv(to_object, ['experiment'], getv(from_object, ['experiment']))
561-
562-
if getv(from_object, ['labels']) is not None:
563-
setv(to_object, ['labels'], getv(from_object, ['labels']))
564-
565-
if getv(from_object, ['outputUri']) is not None:
566-
setv(to_object, ['output_uri'], getv(from_object, ['outputUri']))
567-
568-
if getv(from_object, ['pipelineJob']) is not None:
569-
setv(to_object, ['pipeline_job'], getv(from_object, ['pipelineJob']))
570-
571-
if getv(from_object, ['serviceAccount']) is not None:
572-
setv(to_object, ['service_account'], getv(from_object, ['serviceAccount']))
573-
574-
if getv(from_object, ['tunedModelDisplayName']) is not None:
575-
setv(
576-
to_object,
577-
['tuned_model_display_name'],
578-
getv(from_object, ['tunedModelDisplayName']),
579-
)
580-
581-
if getv(from_object, ['veoTuningSpec']) is not None:
582-
setv(to_object, ['veo_tuning_spec'], getv(from_object, ['veoTuningSpec']))
583-
584554
return to_object
585555

586556

google/genai/types.py

Lines changed: 82 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,19 @@ class Language(_common.CaseInSensitiveEnum):
118118
"""Python >= 3.10, with numpy and simpy available."""
119119

120120

121+
class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
122+
"""Specifies how the response should be scheduled in the conversation."""
123+
124+
SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
125+
"""This value is unused."""
126+
SILENT = 'SILENT'
127+
"""Only add the result to the conversation context, do not interrupt or trigger generation."""
128+
WHEN_IDLE = 'WHEN_IDLE'
129+
"""Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
130+
INTERRUPT = 'INTERRUPT'
131+
"""Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""
132+
133+
121134
class Type(_common.CaseInSensitiveEnum):
122135
"""Optional. The type of the data."""
123136

@@ -144,14 +157,14 @@ class HarmCategory(_common.CaseInSensitiveEnum):
144157

145158
HARM_CATEGORY_UNSPECIFIED = 'HARM_CATEGORY_UNSPECIFIED'
146159
"""The harm category is unspecified."""
147-
HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH'
148-
"""The harm category is hate speech."""
149-
HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT'
150-
"""The harm category is dangerous content."""
151160
HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT'
152161
"""The harm category is harassment."""
162+
HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH'
163+
"""The harm category is hate speech."""
153164
HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT'
154165
"""The harm category is sexually explicit content."""
166+
HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT'
167+
"""The harm category is dangerous content."""
155168
HARM_CATEGORY_CIVIC_INTEGRITY = 'HARM_CATEGORY_CIVIC_INTEGRITY'
156169
"""Deprecated: Election filter is not longer supported. The harm category is civic integrity."""
157170
HARM_CATEGORY_IMAGE_HATE = 'HARM_CATEGORY_IMAGE_HATE'
@@ -702,19 +715,6 @@ class MediaModality(_common.CaseInSensitiveEnum):
702715
"""Document, e.g. PDF."""
703716

704717

705-
class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
706-
"""Specifies how the response should be scheduled in the conversation."""
707-
708-
SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
709-
"""This value is unused."""
710-
SILENT = 'SILENT'
711-
"""Only add the result to the conversation context, do not interrupt or trigger generation."""
712-
WHEN_IDLE = 'WHEN_IDLE'
713-
"""Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
714-
INTERRUPT = 'INTERRUPT'
715-
"""Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""
716-
717-
718718
class StartSensitivity(_common.CaseInSensitiveEnum):
719719
"""Start of speech sensitivity."""
720720

@@ -2672,8 +2672,7 @@ class GoogleSearch(_common.BaseModel):
26722672
)
26732673
exclude_domains: Optional[list[str]] = Field(
26742674
default=None,
2675-
description="""Optional. List of domains to be excluded from the search results.
2676-
The default limit is 2000 domains.""",
2675+
description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"].""",
26772676
)
26782677

26792678

@@ -2686,8 +2685,7 @@ class GoogleSearchDict(TypedDict, total=False):
26862685
"""
26872686

26882687
exclude_domains: Optional[list[str]]
2689-
"""Optional. List of domains to be excluded from the search results.
2690-
The default limit is 2000 domains."""
2688+
"""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]."""
26912689

26922690

26932691
GoogleSearchOrDict = Union[GoogleSearch, GoogleSearchDict]
@@ -8304,34 +8302,6 @@ class DeleteModelResponseDict(TypedDict, total=False):
83048302
DeleteModelResponseOrDict = Union[DeleteModelResponse, DeleteModelResponseDict]
83058303

83068304

8307-
class GenerationConfigThinkingConfig(_common.BaseModel):
8308-
"""Config for thinking features."""
8309-
8310-
include_thoughts: Optional[bool] = Field(
8311-
default=None,
8312-
description="""Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available.""",
8313-
)
8314-
thinking_budget: Optional[int] = Field(
8315-
default=None,
8316-
description="""Optional. Indicates the thinking budget in tokens.""",
8317-
)
8318-
8319-
8320-
class GenerationConfigThinkingConfigDict(TypedDict, total=False):
8321-
"""Config for thinking features."""
8322-
8323-
include_thoughts: Optional[bool]
8324-
"""Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available."""
8325-
8326-
thinking_budget: Optional[int]
8327-
"""Optional. Indicates the thinking budget in tokens."""
8328-
8329-
8330-
GenerationConfigThinkingConfigOrDict = Union[
8331-
GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict
8332-
]
8333-
8334-
83358305
class GenerationConfig(_common.BaseModel):
83368306
"""Generation config."""
83378307

@@ -8400,7 +8370,7 @@ class GenerationConfig(_common.BaseModel):
84008370
default=None,
84018371
description="""Optional. Controls the randomness of predictions.""",
84028372
)
8403-
thinking_config: Optional[GenerationConfigThinkingConfig] = Field(
8373+
thinking_config: Optional[ThinkingConfig] = Field(
84048374
default=None,
84058375
description="""Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.""",
84068376
)
@@ -8412,6 +8382,10 @@ class GenerationConfig(_common.BaseModel):
84128382
default=None,
84138383
description="""Optional. If specified, nucleus sampling will be used.""",
84148384
)
8385+
enable_enhanced_civic_answers: Optional[bool] = Field(
8386+
default=None,
8387+
description="""Optional. Enables enhanced civic answers. It may not be available for all models.""",
8388+
)
84158389

84168390

84178391
class GenerationConfigDict(TypedDict, total=False):
@@ -8474,7 +8448,7 @@ class GenerationConfigDict(TypedDict, total=False):
84748448
temperature: Optional[float]
84758449
"""Optional. Controls the randomness of predictions."""
84768450

8477-
thinking_config: Optional[GenerationConfigThinkingConfigDict]
8451+
thinking_config: Optional[ThinkingConfigDict]
84788452
"""Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking."""
84798453

84808454
top_k: Optional[float]
@@ -8483,6 +8457,9 @@ class GenerationConfigDict(TypedDict, total=False):
84838457
top_p: Optional[float]
84848458
"""Optional. If specified, nucleus sampling will be used."""
84858459

8460+
enable_enhanced_civic_answers: Optional[bool]
8461+
"""Optional. Enables enhanced civic answers. It may not be available for all models."""
8462+
84868463

84878464
GenerationConfigOrDict = Union[GenerationConfig, GenerationConfigDict]
84888465

@@ -9335,14 +9312,22 @@ class TunedModelCheckpointDict(TypedDict, total=False):
93359312

93369313

93379314
class TunedModel(_common.BaseModel):
9315+
"""TunedModel for the Tuned Model of a Tuning Job."""
93389316

93399317
model: Optional[str] = Field(
93409318
default=None,
9341-
description="""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`""",
9319+
description="""Output only. The resource name of the TunedModel.
9320+
Format: `projects/{project}/locations/{location}/models/{model}@{version_id}`
9321+
When tuning from a base model, the version_id will be 1.
9322+
For continuous tuning, the version id will be incremented by 1 from the
9323+
last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`
9324+
""",
93429325
)
93439326
endpoint: Optional[str] = Field(
93449327
default=None,
9345-
description="""Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.""",
9328+
description="""Output only. A resource name of an Endpoint.
9329+
Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
9330+
""",
93469331
)
93479332
checkpoints: Optional[list[TunedModelCheckpoint]] = Field(
93489333
default=None,
@@ -9353,12 +9338,20 @@ class TunedModel(_common.BaseModel):
93539338

93549339

93559340
class TunedModelDict(TypedDict, total=False):
9341+
"""TunedModel for the Tuned Model of a Tuning Job."""
93569342

93579343
model: Optional[str]
9358-
"""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`"""
9344+
"""Output only. The resource name of the TunedModel.
9345+
Format: `projects/{project}/locations/{location}/models/{model}@{version_id}`
9346+
When tuning from a base model, the version_id will be 1.
9347+
For continuous tuning, the version id will be incremented by 1 from the
9348+
last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`
9349+
"""
93599350

93609351
endpoint: Optional[str]
9361-
"""Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`."""
9352+
"""Output only. A resource name of an Endpoint.
9353+
Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
9354+
"""
93629355

93639356
checkpoints: Optional[list[TunedModelCheckpointDict]]
93649357
"""The checkpoints associated with this TunedModel.
@@ -10829,22 +10822,24 @@ class _CancelTuningJobParametersDict(TypedDict, total=False):
1082910822

1083010823

1083110824
class TuningExample(_common.BaseModel):
10825+
"""A single example for tuning."""
1083210826

10833-
text_input: Optional[str] = Field(
10834-
default=None, description="""Text model input."""
10835-
)
1083610827
output: Optional[str] = Field(
10837-
default=None, description="""The expected model output."""
10828+
default=None, description="""Required. The expected model output."""
10829+
)
10830+
text_input: Optional[str] = Field(
10831+
default=None, description="""Optional. Text model input."""
1083810832
)
1083910833

1084010834

1084110835
class TuningExampleDict(TypedDict, total=False):
10842-
10843-
text_input: Optional[str]
10844-
"""Text model input."""
10836+
"""A single example for tuning."""
1084510837

1084610838
output: Optional[str]
10847-
"""The expected model output."""
10839+
"""Required. The expected model output."""
10840+
10841+
text_input: Optional[str]
10842+
"""Optional. Text model input."""
1084810843

1084910844

1085010845
TuningExampleOrDict = Union[TuningExample, TuningExampleDict]
@@ -11656,10 +11651,11 @@ class ListFilesResponse(_common.BaseModel):
1165611651
default=None, description="""Used to retain the full HTTP response."""
1165711652
)
1165811653
next_page_token: Optional[str] = Field(
11659-
default=None, description="""A token to retrieve next page of results."""
11654+
default=None,
11655+
description="""A token that can be sent as a `page_token` into a subsequent `ListFiles` call.""",
1166011656
)
1166111657
files: Optional[list[File]] = Field(
11662-
default=None, description="""The list of files."""
11658+
default=None, description="""The list of `File`s."""
1166311659
)
1166411660

1166511661

@@ -11670,10 +11666,10 @@ class ListFilesResponseDict(TypedDict, total=False):
1167011666
"""Used to retain the full HTTP response."""
1167111667

1167211668
next_page_token: Optional[str]
11673-
"""A token to retrieve next page of results."""
11669+
"""A token that can be sent as a `page_token` into a subsequent `ListFiles` call."""
1167411670

1167511671
files: Optional[list[FileDict]]
11676-
"""The list of files."""
11672+
"""The list of `File`s."""
1167711673

1167811674

1167911675
ListFilesResponseOrDict = Union[ListFilesResponse, ListFilesResponseDict]
@@ -12351,6 +12347,25 @@ def done(self) -> bool:
1235112347
return self.state.name in JOB_STATES_ENDED
1235212348

1235312349

12350+
class GenerationConfigThinkingConfig(ThinkingConfig):
12351+
"""Config for thinking feature.
12352+
12353+
This class will be deprecated. Please use `ThinkingConfig` instead.
12354+
"""
12355+
12356+
12357+
class GenerationConfigThinkingConfigDict(ThinkingConfigDict):
12358+
"""Config for thinking feature.
12359+
12360+
This class will be deprecated. Please use `ThinkingConfig` instead.
12361+
"""
12362+
12363+
12364+
GenerationConfigThinkingConfigOrDict = Union[
12365+
GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict
12366+
]
12367+
12368+
1235412369
class BatchJobDict(TypedDict, total=False):
1235512370
"""Config for batches.create return value."""
1235612371

0 commit comments

Comments
 (0)