diff --git a/librarian.yaml b/librarian.yaml index 21f0ff9eddcb..1400b5d2e2a6 100644 --- a/librarian.yaml +++ b/librarian.yaml @@ -16,8 +16,8 @@ version: v0.13.0 repo: googleapis/google-cloud-python sources: googleapis: - commit: 392c8aa259daf4570073220faa9e842cf1e2e173 - sha256: 2b3be6d9749cdf2a337f2c76aa08857096671ebd4b17007d09ee3752b7b10fe9 + commit: 84009fb6ad8975910a5cb62d32e7f35e48c9da02 + sha256: b82e61ef9bc042fe02c9c9113a53d207331dc7cd8c10cc0d4f4cbeb7e65f1577 default: output: packages tag_format: '{name}-v{version}' diff --git a/packages/google-apps-chat/google/apps/chat/__init__.py b/packages/google-apps-chat/google/apps/chat/__init__.py index 189d329e52a3..45fb7a15c660 100644 --- a/packages/google-apps-chat/google/apps/chat/__init__.py +++ b/packages/google-apps-chat/google/apps/chat/__init__.py @@ -82,6 +82,7 @@ ActionResponse, AttachedGif, CardWithId, + CreateMessageNotificationOptions, CreateMessageRequest, DeleteMessageRequest, Dialog, @@ -222,6 +223,7 @@ "ActionResponse", "AttachedGif", "CardWithId", + "CreateMessageNotificationOptions", "CreateMessageRequest", "DeleteMessageRequest", "Dialog", diff --git a/packages/google-apps-chat/google/apps/chat_v1/__init__.py b/packages/google-apps-chat/google/apps/chat_v1/__init__.py index 0c66ba3e1c97..52f15f6f26c2 100644 --- a/packages/google-apps-chat/google/apps/chat_v1/__init__.py +++ b/packages/google-apps-chat/google/apps/chat_v1/__init__.py @@ -84,6 +84,7 @@ ActionResponse, AttachedGif, CardWithId, + CreateMessageNotificationOptions, CreateMessageRequest, DeleteMessageRequest, Dialog, @@ -269,6 +270,7 @@ def _get_version(dependency_name): "ContextualAddOnMarkup", "CreateCustomEmojiRequest", "CreateMembershipRequest", + "CreateMessageNotificationOptions", "CreateMessageRequest", "CreateReactionRequest", "CreateSectionRequest", diff --git a/packages/google-apps-chat/google/apps/chat_v1/types/__init__.py b/packages/google-apps-chat/google/apps/chat_v1/types/__init__.py index 74f3ce5346cf..9a1f3c484828 100644 --- a/packages/google-apps-chat/google/apps/chat_v1/types/__init__.py +++ b/packages/google-apps-chat/google/apps/chat_v1/types/__init__.py @@ -85,6 +85,7 @@ ActionResponse, AttachedGif, CardWithId, + CreateMessageNotificationOptions, CreateMessageRequest, DeleteMessageRequest, Dialog, @@ -231,6 +232,7 @@ "ActionResponse", "AttachedGif", "CardWithId", + "CreateMessageNotificationOptions", "CreateMessageRequest", "DeleteMessageRequest", "Dialog", diff --git a/packages/google-apps-chat/google/apps/chat_v1/types/message.py b/packages/google-apps-chat/google/apps/chat_v1/types/message.py index f3b658369081..d85925d9b891 100644 --- a/packages/google-apps-chat/google/apps/chat_v1/types/message.py +++ b/packages/google-apps-chat/google/apps/chat_v1/types/message.py @@ -45,6 +45,7 @@ "DeleteMessageRequest", "UpdateMessageRequest", "CreateMessageRequest", + "CreateMessageNotificationOptions", "ListMessagesRequest", "ListMessagesResponse", "DialogAction", @@ -205,6 +206,10 @@ class Message(proto.Message): If the space doesn't support reply in threads, this field is always ``false``. + silent (bool): + Output only. Whether this is a silent + message. Silent messages are messages where Chat + suppresses push notifications for recipients. client_assigned_message_id (str): Optional. A custom ID for the message. You can use field to identify a message, or to get, delete, or update a message. @@ -352,6 +357,10 @@ class Message(proto.Message): proto.BOOL, number=25, ) + silent: bool = proto.Field( + proto.BOOL, + number=46, + ) client_assigned_message_id: str = proto.Field( proto.STRING, number=32, @@ -911,6 +920,11 @@ class CreateMessageRequest(proto.Message): For details, see `Name a message `__. + create_message_notification_options (google.apps.chat_v1.types.CreateMessageNotificationOptions): + Optional. Controls the notification behavior when the + message is posted. To learn more, see `Force notifications + or send silent + messages `__. """ class MessageReplyOption(proto.Enum): @@ -967,6 +981,61 @@ class MessageReplyOption(proto.Enum): proto.STRING, number=9, ) + create_message_notification_options: "CreateMessageNotificationOptions" = ( + proto.Field( + proto.MESSAGE, + number=10, + message="CreateMessageNotificationOptions", + ) + ) + + +class CreateMessageNotificationOptions(proto.Message): + r"""Options for the notification behavior when the message is + posted. + + Attributes: + notification_type (google.apps.chat_v1.types.CreateMessageNotificationOptions.NotificationType): + The notification type for the message. + """ + + class NotificationType(proto.Enum): + r"""The notification types options for the message. + + Values: + NOTIFICATION_TYPE_NONE (0): + Default behavior. Notification behavior is + similar to when the human user sends the message + using the Chat UI: no notification is sent to + the human sender. + NOTIFICATION_TYPE_FORCE_NOTIFY (2): + Force notify recipients. This bypasses users' space + notification settings and `Chat Do Not Disturb + settings `__. + This option does not bypass device-level Do Not Disturb + settings. + + Requires [app authentication] + (https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + NOTIFICATION_TYPE_SILENT (3): + Silence the notification as if the recipients have `Chat Do + Not + Disturb `__ + enabled or have muted the space. + + Requires [app authentication] + (https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + """ + + NOTIFICATION_TYPE_NONE = 0 + NOTIFICATION_TYPE_FORCE_NOTIFY = 2 + NOTIFICATION_TYPE_SILENT = 3 + + notification_type: NotificationType = proto.Field( + proto.ENUM, + number=1, + enum=NotificationType, + ) class ListMessagesRequest(proto.Message): diff --git a/packages/google-apps-chat/tests/unit/gapic/chat_v1/test_chat_service.py b/packages/google-apps-chat/tests/unit/gapic/chat_v1/test_chat_service.py index 1390d4c93e40..3edc1171ab8b 100644 --- a/packages/google-apps-chat/tests/unit/gapic/chat_v1/test_chat_service.py +++ b/packages/google-apps-chat/tests/unit/gapic/chat_v1/test_chat_service.py @@ -1369,6 +1369,7 @@ def test_create_message(request_type, transport: str = "grpc"): fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) response = client.create_message(request) @@ -1387,6 +1388,7 @@ def test_create_message(request_type, transport: str = "grpc"): assert response.fallback_text == "fallback_text_value" assert response.argument_text == "argument_text_value" assert response.thread_reply is True + assert response.silent is True assert response.client_assigned_message_id == "client_assigned_message_id_value" @@ -1525,6 +1527,7 @@ async def test_create_message_async( fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) ) @@ -1544,6 +1547,7 @@ async def test_create_message_async( assert response.fallback_text == "fallback_text_value" assert response.argument_text == "argument_text_value" assert response.thread_reply is True + assert response.silent is True assert response.client_assigned_message_id == "client_assigned_message_id_value" @@ -3107,6 +3111,7 @@ def test_get_message(request_type, transport: str = "grpc"): fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) response = client.get_message(request) @@ -3125,6 +3130,7 @@ def test_get_message(request_type, transport: str = "grpc"): assert response.fallback_text == "fallback_text_value" assert response.argument_text == "argument_text_value" assert response.thread_reply is True + assert response.silent is True assert response.client_assigned_message_id == "client_assigned_message_id_value" @@ -3257,6 +3263,7 @@ async def test_get_message_async( fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) ) @@ -3276,6 +3283,7 @@ async def test_get_message_async( assert response.fallback_text == "fallback_text_value" assert response.argument_text == "argument_text_value" assert response.thread_reply is True + assert response.silent is True assert response.client_assigned_message_id == "client_assigned_message_id_value" @@ -3450,6 +3458,7 @@ def test_update_message(request_type, transport: str = "grpc"): fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) response = client.update_message(request) @@ -3468,6 +3477,7 @@ def test_update_message(request_type, transport: str = "grpc"): assert response.fallback_text == "fallback_text_value" assert response.argument_text == "argument_text_value" assert response.thread_reply is True + assert response.silent is True assert response.client_assigned_message_id == "client_assigned_message_id_value" @@ -3596,6 +3606,7 @@ async def test_update_message_async( fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) ) @@ -3615,6 +3626,7 @@ async def test_update_message_async( assert response.fallback_text == "fallback_text_value" assert response.argument_text == "argument_text_value" assert response.thread_reply is True + assert response.silent is True assert response.client_assigned_message_id == "client_assigned_message_id_value" @@ -16805,6 +16817,7 @@ def test_create_message_rest_required_fields( # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( ( + "create_message_notification_options", "message_id", "message_reply_option", "request_id", @@ -16869,6 +16882,7 @@ def test_create_message_rest_unset_required_fields(): assert set(unset_fields) == ( set( ( + "createMessageNotificationOptions", "messageId", "messageReplyOption", "requestId", @@ -25650,6 +25664,7 @@ async def test_create_message_empty_call_grpc_asyncio(): fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) ) @@ -25766,6 +25781,7 @@ async def test_get_message_empty_call_grpc_asyncio(): fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) ) @@ -25799,6 +25815,7 @@ async def test_update_message_empty_call_grpc_asyncio(): fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) ) @@ -27406,6 +27423,7 @@ def test_create_message_rest_call_success(request_type): ], "matched_url": {"url": "url_value"}, "thread_reply": True, + "silent": True, "client_assigned_message_id": "client_assigned_message_id_value", "emoji_reaction_summaries": [ { @@ -27513,6 +27531,7 @@ def get_message_fields(field): fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) @@ -27536,6 +27555,7 @@ def get_message_fields(field): assert response.fallback_text == "fallback_text_value" assert response.argument_text == "argument_text_value" assert response.thread_reply is True + assert response.silent is True assert response.client_assigned_message_id == "client_assigned_message_id_value" @@ -28045,6 +28065,7 @@ def test_get_message_rest_call_success(request_type): fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) @@ -28068,6 +28089,7 @@ def test_get_message_rest_call_success(request_type): assert response.fallback_text == "fallback_text_value" assert response.argument_text == "argument_text_value" assert response.thread_reply is True + assert response.silent is True assert response.client_assigned_message_id == "client_assigned_message_id_value" @@ -28624,6 +28646,7 @@ def test_update_message_rest_call_success(request_type): ], "matched_url": {"url": "url_value"}, "thread_reply": True, + "silent": True, "client_assigned_message_id": "client_assigned_message_id_value", "emoji_reaction_summaries": [ { @@ -28731,6 +28754,7 @@ def get_message_fields(field): fallback_text="fallback_text_value", argument_text="argument_text_value", thread_reply=True, + silent=True, client_assigned_message_id="client_assigned_message_id_value", ) @@ -28754,6 +28778,7 @@ def get_message_fields(field): assert response.fallback_text == "fallback_text_value" assert response.argument_text == "argument_text_value" assert response.thread_reply is True + assert response.silent is True assert response.client_assigned_message_id == "client_assigned_message_id_value" diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/__init__.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/__init__.py index 1af536f46611..0ed7374600c0 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/__init__.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/__init__.py @@ -379,6 +379,27 @@ CancelRegionInstanceGroupManagerResizeRequestRequest, CancelRequestRemovePeeringNetworkRequest, CancelRolloutRequest, + CapacityAdviceRequest, + CapacityAdviceRequestDistributionPolicy, + CapacityAdviceRequestDistributionPolicyZoneConfiguration, + CapacityAdviceRequestInstanceFlexibilityPolicy, + CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection, + CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelectionAttachedDisk, + CapacityAdviceRequestInstanceProperties, + CapacityAdviceRequestInstancePropertiesScheduling, + CapacityAdviceResponse, + CapacityAdviceResponseRecommendation, + CapacityAdviceResponseRecommendationScores, + CapacityAdviceResponseRecommendationShard, + CapacityAdviceRpcRequest, + CapacityHistoryAdviceRequest, + CapacityHistoryRequest, + CapacityHistoryRequestInstanceProperties, + CapacityHistoryRequestInstancePropertiesScheduling, + CapacityHistoryRequestLocationPolicy, + CapacityHistoryResponse, + CapacityHistoryResponsePreemptionRecord, + CapacityHistoryResponsePriceRecord, CircuitBreakers, CloneRulesFirewallPolicyRequest, CloneRulesNetworkFirewallPolicyRequest, @@ -599,6 +620,7 @@ FlexibleTimeRange, ForwardingRule, ForwardingRuleAggregatedList, + ForwardingRuleAttachedExtension, ForwardingRuleList, ForwardingRuleReference, ForwardingRuleServiceDirectoryRegistration, @@ -676,6 +698,7 @@ GetIamPolicyInstantSnapshotRequest, GetIamPolicyInterconnectAttachmentGroupRequest, GetIamPolicyInterconnectGroupRequest, + GetIamPolicyLicenseCodeRequest, GetIamPolicyLicenseRequest, GetIamPolicyMachineImageRequest, GetIamPolicyNetworkAttachmentRequest, @@ -1190,6 +1213,7 @@ InterconnectRemoteLocationPermittedConnections, InterconnectsGetDiagnosticsResponse, InterconnectsGetMacsecConfigResponse, + Interval, InvalidateCacheRegionUrlMapRequest, InvalidateCacheUrlMapRequest, Items, @@ -1370,10 +1394,12 @@ ManagedInstanceLastAttemptErrors, ManagedInstancePropertiesFromFlexibilityPolicy, ManagedInstanceScheduling, + ManagedInstanceShutdownDetails, ManagedInstanceVersion, Metadata, MetadataFilter, MetadataFilterLabelMatch, + Money, MoveAddressRequest, MoveDiskProjectRequest, MoveFirewallPolicyRequest, @@ -1890,6 +1916,7 @@ SetIamPolicyInstantSnapshotRequest, SetIamPolicyInterconnectAttachmentGroupRequest, SetIamPolicyInterconnectGroupRequest, + SetIamPolicyLicenseCodeRequest, SetIamPolicyLicenseRequest, SetIamPolicyMachineImageRequest, SetIamPolicyNetworkAttachmentRequest, @@ -2623,6 +2650,27 @@ def _get_version(dependency_name): "CancelRegionInstanceGroupManagerResizeRequestRequest", "CancelRequestRemovePeeringNetworkRequest", "CancelRolloutRequest", + "CapacityAdviceRequest", + "CapacityAdviceRequestDistributionPolicy", + "CapacityAdviceRequestDistributionPolicyZoneConfiguration", + "CapacityAdviceRequestInstanceFlexibilityPolicy", + "CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection", + "CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelectionAttachedDisk", + "CapacityAdviceRequestInstanceProperties", + "CapacityAdviceRequestInstancePropertiesScheduling", + "CapacityAdviceResponse", + "CapacityAdviceResponseRecommendation", + "CapacityAdviceResponseRecommendationScores", + "CapacityAdviceResponseRecommendationShard", + "CapacityAdviceRpcRequest", + "CapacityHistoryAdviceRequest", + "CapacityHistoryRequest", + "CapacityHistoryRequestInstanceProperties", + "CapacityHistoryRequestInstancePropertiesScheduling", + "CapacityHistoryRequestLocationPolicy", + "CapacityHistoryResponse", + "CapacityHistoryResponsePreemptionRecord", + "CapacityHistoryResponsePriceRecord", "CircuitBreakers", "CloneRulesFirewallPolicyRequest", "CloneRulesNetworkFirewallPolicyRequest", @@ -2850,6 +2898,7 @@ def _get_version(dependency_name): "FlexibleTimeRange", "ForwardingRule", "ForwardingRuleAggregatedList", + "ForwardingRuleAttachedExtension", "ForwardingRuleList", "ForwardingRuleReference", "ForwardingRuleServiceDirectoryRegistration", @@ -2931,6 +2980,7 @@ def _get_version(dependency_name): "GetIamPolicyInstantSnapshotRequest", "GetIamPolicyInterconnectAttachmentGroupRequest", "GetIamPolicyInterconnectGroupRequest", + "GetIamPolicyLicenseCodeRequest", "GetIamPolicyLicenseRequest", "GetIamPolicyMachineImageRequest", "GetIamPolicyNetworkAttachmentRequest", @@ -3467,6 +3517,7 @@ def _get_version(dependency_name): "InterconnectsClient", "InterconnectsGetDiagnosticsResponse", "InterconnectsGetMacsecConfigResponse", + "Interval", "InvalidateCacheRegionUrlMapRequest", "InvalidateCacheUrlMapRequest", "Items", @@ -3651,10 +3702,12 @@ def _get_version(dependency_name): "ManagedInstanceLastAttemptErrors", "ManagedInstancePropertiesFromFlexibilityPolicy", "ManagedInstanceScheduling", + "ManagedInstanceShutdownDetails", "ManagedInstanceVersion", "Metadata", "MetadataFilter", "MetadataFilterLabelMatch", + "Money", "MoveAddressRequest", "MoveDiskProjectRequest", "MoveFirewallPolicyRequest", @@ -4235,6 +4288,7 @@ def _get_version(dependency_name): "SetIamPolicyInstantSnapshotRequest", "SetIamPolicyInterconnectAttachmentGroupRequest", "SetIamPolicyInterconnectGroupRequest", + "SetIamPolicyLicenseCodeRequest", "SetIamPolicyLicenseRequest", "SetIamPolicyMachineImageRequest", "SetIamPolicyNetworkAttachmentRequest", diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/gapic_metadata.json b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/gapic_metadata.json index e69e2ffa5b72..beb3e841fe38 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/gapic_metadata.json +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/gapic_metadata.json @@ -87,6 +87,16 @@ "methods": [ "calendar_mode" ] + }, + "Capacity": { + "methods": [ + "capacity" + ] + }, + "CapacityHistory": { + "methods": [ + "capacity_history" + ] } } } @@ -2166,6 +2176,16 @@ "get" ] }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, "TestIamPermissions": { "methods": [ "test_iam_permissions" diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/client.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/client.py index 710fd0e0489a..e4f0ed1a145e 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/client.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/client.py @@ -841,6 +841,272 @@ def sample_calendar_mode(): # Done; return the response. return response + def capacity( + self, + request: Optional[Union[compute.CapacityAdviceRpcRequest, dict]] = None, + *, + project: Optional[str] = None, + region: Optional[str] = None, + capacity_advice_request_resource: Optional[ + compute.CapacityAdviceRequest + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> compute.CapacityAdviceResponse: + r"""Advice on making real-time decisions (such as + choosing zone or machine types) during deployment to + maximize your chances of obtaining capacity. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import compute_v1beta + + def sample_capacity(): + # Create a client + client = compute_v1beta.AdviceClient() + + # Initialize request argument(s) + request = compute_v1beta.CapacityAdviceRpcRequest( + project="project_value", + region="region_value", + ) + + # Make the request + response = client.capacity(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.compute_v1beta.types.CapacityAdviceRpcRequest, dict]): + The request object. A request message for + Advice.Capacity. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + capacity_advice_request_resource (google.cloud.compute_v1beta.types.CapacityAdviceRequest): + The body resource for this request + This corresponds to the ``capacity_advice_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.compute_v1beta.types.CapacityAdviceResponse: + A response contains scoring + recommendations. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [project, region, capacity_advice_request_resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, compute.CapacityAdviceRpcRequest): + request = compute.CapacityAdviceRpcRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if capacity_advice_request_resource is not None: + request.capacity_advice_request_resource = ( + capacity_advice_request_resource + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.capacity] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ("project", request.project), + ("region", request.region), + ) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def capacity_history( + self, + request: Optional[Union[compute.CapacityHistoryAdviceRequest, dict]] = None, + *, + project: Optional[str] = None, + region: Optional[str] = None, + capacity_history_request_resource: Optional[ + compute.CapacityHistoryRequest + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> compute.CapacityHistoryResponse: + r"""Gets the capacity history. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import compute_v1beta + + def sample_capacity_history(): + # Create a client + client = compute_v1beta.AdviceClient() + + # Initialize request argument(s) + request = compute_v1beta.CapacityHistoryAdviceRequest( + project="project_value", + region="region_value", + ) + + # Make the request + response = client.capacity_history(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.compute_v1beta.types.CapacityHistoryAdviceRequest, dict]): + The request object. A request message for + Advice.CapacityHistory. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + capacity_history_request_resource (google.cloud.compute_v1beta.types.CapacityHistoryRequest): + The body resource for this request + This corresponds to the ``capacity_history_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.compute_v1beta.types.CapacityHistoryResponse: + Contains the capacity history. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [project, region, capacity_history_request_resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, compute.CapacityHistoryAdviceRequest): + request = compute.CapacityHistoryAdviceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if capacity_history_request_resource is not None: + request.capacity_history_request_resource = ( + capacity_history_request_resource + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.capacity_history] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ("project", request.project), + ("region", request.region), + ) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "AdviceClient": return self diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/base.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/base.py index 37817b17179b..94979f41fc5f 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/base.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/base.py @@ -150,6 +150,16 @@ def _prep_wrapped_messages(self, client_info): default_timeout=600.0, client_info=client_info, ), + self.capacity: gapic_v1.method.wrap_method( + self.capacity, + default_timeout=600.0, + client_info=client_info, + ), + self.capacity_history: gapic_v1.method.wrap_method( + self.capacity_history, + default_timeout=600.0, + client_info=client_info, + ), } def close(self): @@ -173,6 +183,28 @@ def calendar_mode( ]: raise NotImplementedError() + @property + def capacity( + self, + ) -> Callable[ + [compute.CapacityAdviceRpcRequest], + Union[ + compute.CapacityAdviceResponse, Awaitable[compute.CapacityAdviceResponse] + ], + ]: + raise NotImplementedError() + + @property + def capacity_history( + self, + ) -> Callable[ + [compute.CapacityHistoryAdviceRequest], + Union[ + compute.CapacityHistoryResponse, Awaitable[compute.CapacityHistoryResponse] + ], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/rest.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/rest.py index 50bff38bf9e3..187a9973fa05 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/rest.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/rest.py @@ -80,6 +80,22 @@ def post_calendar_mode(self, response): logging.log(f"Received response: {response}") return response + def pre_capacity(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_capacity(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_capacity_history(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_capacity_history(self, response): + logging.log(f"Received response: {response}") + return response + transport = AdviceRestTransport(interceptor=MyCustomAdviceInterceptor()) client = AdviceClient(transport=transport) @@ -136,6 +152,104 @@ def post_calendar_mode_with_metadata( """ return response, metadata + def pre_capacity( + self, + request: compute.CapacityAdviceRpcRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + compute.CapacityAdviceRpcRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for capacity + + Override in a subclass to manipulate the request or metadata + before they are sent to the Advice server. + """ + return request, metadata + + def post_capacity( + self, response: compute.CapacityAdviceResponse + ) -> compute.CapacityAdviceResponse: + """Post-rpc interceptor for capacity + + DEPRECATED. Please use the `post_capacity_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the Advice server but before + it is returned to user code. This `post_capacity` interceptor runs + before the `post_capacity_with_metadata` interceptor. + """ + return response + + def post_capacity_with_metadata( + self, + response: compute.CapacityAdviceResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[compute.CapacityAdviceResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for capacity + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Advice server but before it is returned to user code. + + We recommend only using this `post_capacity_with_metadata` + interceptor in new development instead of the `post_capacity` interceptor. + When both interceptors are used, this `post_capacity_with_metadata` interceptor runs after the + `post_capacity` interceptor. The (possibly modified) response returned by + `post_capacity` will be passed to + `post_capacity_with_metadata`. + """ + return response, metadata + + def pre_capacity_history( + self, + request: compute.CapacityHistoryAdviceRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + compute.CapacityHistoryAdviceRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for capacity_history + + Override in a subclass to manipulate the request or metadata + before they are sent to the Advice server. + """ + return request, metadata + + def post_capacity_history( + self, response: compute.CapacityHistoryResponse + ) -> compute.CapacityHistoryResponse: + """Post-rpc interceptor for capacity_history + + DEPRECATED. Please use the `post_capacity_history_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the Advice server but before + it is returned to user code. This `post_capacity_history` interceptor runs + before the `post_capacity_history_with_metadata` interceptor. + """ + return response + + def post_capacity_history_with_metadata( + self, + response: compute.CapacityHistoryResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + compute.CapacityHistoryResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for capacity_history + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Advice server but before it is returned to user code. + + We recommend only using this `post_capacity_history_with_metadata` + interceptor in new development instead of the `post_capacity_history` interceptor. + When both interceptors are used, this `post_capacity_history_with_metadata` interceptor runs after the + `post_capacity_history` interceptor. The (possibly modified) response returned by + `post_capacity_history` will be passed to + `post_capacity_history_with_metadata`. + """ + return response, metadata + @dataclasses.dataclass class AdviceRestStub: @@ -396,6 +510,318 @@ def __call__( ) return resp + class _Capacity(_BaseAdviceRestTransport._BaseCapacity, AdviceRestStub): + def __hash__(self): + return hash("AdviceRestTransport.Capacity") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: compute.CapacityAdviceRpcRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> compute.CapacityAdviceResponse: + r"""Call the capacity method over HTTP. + + Args: + request (~.compute.CapacityAdviceRpcRequest): + The request object. A request message for + Advice.Capacity. See the method + description for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.compute.CapacityAdviceResponse: + A response contains scoring + recommendations. + + """ + + http_options = _BaseAdviceRestTransport._BaseCapacity._get_http_options() + + request, metadata = self._interceptor.pre_capacity(request, metadata) + transcoded_request = ( + _BaseAdviceRestTransport._BaseCapacity._get_transcoded_request( + http_options, request + ) + ) + + body = _BaseAdviceRestTransport._BaseCapacity._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = ( + _BaseAdviceRestTransport._BaseCapacity._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.compute_v1beta.AdviceClient.Capacity", + extra={ + "serviceName": "google.cloud.compute.v1beta.Advice", + "rpcName": "Capacity", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = AdviceRestTransport._Capacity._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = compute.CapacityAdviceResponse() + pb_resp = compute.CapacityAdviceResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_capacity(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_capacity_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = compute.CapacityAdviceResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.compute_v1beta.AdviceClient.capacity", + extra={ + "serviceName": "google.cloud.compute.v1beta.Advice", + "rpcName": "Capacity", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CapacityHistory( + _BaseAdviceRestTransport._BaseCapacityHistory, AdviceRestStub + ): + def __hash__(self): + return hash("AdviceRestTransport.CapacityHistory") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: compute.CapacityHistoryAdviceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> compute.CapacityHistoryResponse: + r"""Call the capacity history method over HTTP. + + Args: + request (~.compute.CapacityHistoryAdviceRequest): + The request object. A request message for + Advice.CapacityHistory. See the method + description for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.compute.CapacityHistoryResponse: + Contains the capacity history. + """ + + http_options = ( + _BaseAdviceRestTransport._BaseCapacityHistory._get_http_options() + ) + + request, metadata = self._interceptor.pre_capacity_history( + request, metadata + ) + transcoded_request = ( + _BaseAdviceRestTransport._BaseCapacityHistory._get_transcoded_request( + http_options, request + ) + ) + + body = _BaseAdviceRestTransport._BaseCapacityHistory._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = ( + _BaseAdviceRestTransport._BaseCapacityHistory._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.compute_v1beta.AdviceClient.CapacityHistory", + extra={ + "serviceName": "google.cloud.compute.v1beta.Advice", + "rpcName": "CapacityHistory", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = AdviceRestTransport._CapacityHistory._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = compute.CapacityHistoryResponse() + pb_resp = compute.CapacityHistoryResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_capacity_history(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_capacity_history_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = compute.CapacityHistoryResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.compute_v1beta.AdviceClient.capacity_history", + extra={ + "serviceName": "google.cloud.compute.v1beta.Advice", + "rpcName": "CapacityHistory", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + @property def calendar_mode( self, @@ -406,6 +832,24 @@ def calendar_mode( # In C++ this would require a dynamic_cast return self._CalendarMode(self._session, self._host, self._interceptor) # type: ignore + @property + def capacity( + self, + ) -> Callable[[compute.CapacityAdviceRpcRequest], compute.CapacityAdviceResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._Capacity(self._session, self._host, self._interceptor) # type: ignore + + @property + def capacity_history( + self, + ) -> Callable[ + [compute.CapacityHistoryAdviceRequest], compute.CapacityHistoryResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CapacityHistory(self._session, self._host, self._interceptor) # type: ignore + @property def kind(self) -> str: return "rest" diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/rest_base.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/rest_base.py index 3670d92591ab..a2d1711bbe96 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/rest_base.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/advice/transports/rest_base.py @@ -143,5 +143,117 @@ def _get_query_params_json(transcoded_request): return query_params + class _BaseCapacity: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/compute/beta/projects/{project}/regions/{region}/advice/capacity", + "body": "capacity_advice_request_resource", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = compute.CapacityAdviceRpcRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update( + _BaseAdviceRestTransport._BaseCapacity._get_unset_required_fields( + query_params + ) + ) + + return query_params + + class _BaseCapacityHistory: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/compute/beta/projects/{project}/regions/{region}/advice/capacityHistory", + "body": "capacity_history_request_resource", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = compute.CapacityHistoryAdviceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update( + _BaseAdviceRestTransport._BaseCapacityHistory._get_unset_required_fields( + query_params + ) + ) + + return query_params + __all__ = ("_BaseAdviceRestTransport",) diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/client.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/client.py index 83f14f444c2c..65a8b4099355 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/client.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/client.py @@ -830,6 +830,326 @@ def sample_get(): # Done; return the response. return response + def get_iam_policy( + self, + request: Optional[Union[compute.GetIamPolicyLicenseCodeRequest, dict]] = None, + *, + project: Optional[str] = None, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be empty if + no such policy or resource exists. *Caution* This resource is + intended for use only by third-party partners who are + creatingCloud Marketplace images. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import compute_v1beta + + def sample_get_iam_policy(): + # Create a client + client = compute_v1beta.LicenseCodesClient() + + # Initialize request argument(s) + request = compute_v1beta.GetIamPolicyLicenseCodeRequest( + project="project_value", + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.compute_v1beta.types.GetIamPolicyLicenseCodeRequest, dict]): + The request object. A request message for + LicenseCodes.GetIamPolicy. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.compute_v1beta.types.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [project, resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, compute.GetIamPolicyLicenseCodeRequest): + request = compute.GetIamPolicyLicenseCodeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ("project", request.project), + ("resource", request.resource), + ) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[Union[compute.SetIamPolicyLicenseCodeRequest, dict]] = None, + *, + project: Optional[str] = None, + resource: Optional[str] = None, + global_set_policy_request_resource: Optional[ + compute.GlobalSetPolicyRequest + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified resource. + Replaces any existing policy. *Caution* This resource is + intended for use only by third-party partners who are + creatingCloud Marketplace images. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import compute_v1beta + + def sample_set_iam_policy(): + # Create a client + client = compute_v1beta.LicenseCodesClient() + + # Initialize request argument(s) + request = compute_v1beta.SetIamPolicyLicenseCodeRequest( + project="project_value", + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.compute_v1beta.types.SetIamPolicyLicenseCodeRequest, dict]): + The request object. A request message for + LicenseCodes.SetIamPolicy. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_set_policy_request_resource (google.cloud.compute_v1beta.types.GlobalSetPolicyRequest): + The body resource for this request + This corresponds to the ``global_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.compute_v1beta.types.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [project, resource, global_set_policy_request_resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, compute.SetIamPolicyLicenseCodeRequest): + request = compute.SetIamPolicyLicenseCodeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if global_set_policy_request_resource is not None: + request.global_set_policy_request_resource = ( + global_set_policy_request_resource + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ("project", request.project), + ("resource", request.resource), + ) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def test_iam_permissions( self, request: Optional[ diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/base.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/base.py index 19b8d05fe152..e5b09479dfe1 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/base.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/base.py @@ -40,7 +40,6 @@ class LicenseCodesTransport(abc.ABC): """Abstract transport class for LicenseCodes.""" AUTH_SCOPES = ( - "https://www.googleapis.com/auth/compute.readonly", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/cloud-platform", ) @@ -161,6 +160,26 @@ def _prep_wrapped_messages(self, client_info): default_timeout=600.0, client_info=client_info, ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=600.0, + client_info=client_info, + ), self.test_iam_permissions: gapic_v1.method.wrap_method( self.test_iam_permissions, default_timeout=600.0, @@ -186,6 +205,24 @@ def get( ]: raise NotImplementedError() + @property + def get_iam_policy( + self, + ) -> Callable[ + [compute.GetIamPolicyLicenseCodeRequest], + Union[compute.Policy, Awaitable[compute.Policy]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [compute.SetIamPolicyLicenseCodeRequest], + Union[compute.Policy, Awaitable[compute.Policy]], + ]: + raise NotImplementedError() + @property def test_iam_permissions( self, diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/rest.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/rest.py index 8ab3e0f737e4..cb063a07d79e 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/rest.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/rest.py @@ -80,6 +80,22 @@ def post_get(self, response): logging.log(f"Received response: {response}") return response + def pre_get_iam_policy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_iam_policy(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_set_iam_policy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_set_iam_policy(self, response): + logging.log(f"Received response: {response}") + return response + def pre_test_iam_permissions(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -138,6 +154,98 @@ def post_get_with_metadata( """ return response, metadata + def pre_get_iam_policy( + self, + request: compute.GetIamPolicyLicenseCodeRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + compute.GetIamPolicyLicenseCodeRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the LicenseCodes server. + """ + return request, metadata + + def post_get_iam_policy(self, response: compute.Policy) -> compute.Policy: + """Post-rpc interceptor for get_iam_policy + + DEPRECATED. Please use the `post_get_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the LicenseCodes server but before + it is returned to user code. This `post_get_iam_policy` interceptor runs + before the `post_get_iam_policy_with_metadata` interceptor. + """ + return response + + def post_get_iam_policy_with_metadata( + self, + response: compute.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[compute.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the LicenseCodes server but before it is returned to user code. + + We recommend only using this `post_get_iam_policy_with_metadata` + interceptor in new development instead of the `post_get_iam_policy` interceptor. + When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the + `post_get_iam_policy` interceptor. The (possibly modified) response returned by + `post_get_iam_policy` will be passed to + `post_get_iam_policy_with_metadata`. + """ + return response, metadata + + def pre_set_iam_policy( + self, + request: compute.SetIamPolicyLicenseCodeRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + compute.SetIamPolicyLicenseCodeRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the LicenseCodes server. + """ + return request, metadata + + def post_set_iam_policy(self, response: compute.Policy) -> compute.Policy: + """Post-rpc interceptor for set_iam_policy + + DEPRECATED. Please use the `post_set_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the LicenseCodes server but before + it is returned to user code. This `post_set_iam_policy` interceptor runs + before the `post_set_iam_policy_with_metadata` interceptor. + """ + return response + + def post_set_iam_policy_with_metadata( + self, + response: compute.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[compute.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the LicenseCodes server but before it is returned to user code. + + We recommend only using this `post_set_iam_policy_with_metadata` + interceptor in new development instead of the `post_set_iam_policy` interceptor. + When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the + `post_set_iam_policy` interceptor. The (possibly modified) response returned by + `post_set_iam_policy` will be passed to + `post_set_iam_policy_with_metadata`. + """ + return response, metadata + def pre_test_iam_permissions( self, request: compute.TestIamPermissionsLicenseCodeRequest, @@ -437,6 +545,458 @@ def __call__( ) return resp + class _GetIamPolicy( + _BaseLicenseCodesRestTransport._BaseGetIamPolicy, LicenseCodesRestStub + ): + def __hash__(self): + return hash("LicenseCodesRestTransport.GetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: compute.GetIamPolicyLicenseCodeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyLicenseCodeRequest): + The request object. A request message for + LicenseCodes.GetIamPolicy. See the + method description for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members``, or + principals, to a single ``role``. Principals can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A ``role`` is a named list of + permissions; each ``role`` can be an IAM predefined role + or a user-created custom role. + + For some types of Google Cloud resources, a ``binding`` + can also specify a ``condition``, which is a logical + expression that allows access to a resource only if the + expression evaluates to ``true``. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + + **JSON example:** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } + + **YAML example:** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 + + For a description of IAM and its features, see the `IAM + documentation `__. + + """ + + http_options = ( + _BaseLicenseCodesRestTransport._BaseGetIamPolicy._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + transcoded_request = _BaseLicenseCodesRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = ( + _BaseLicenseCodesRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.compute_v1beta.LicenseCodesClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.compute.v1beta.LicenseCodes", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = LicenseCodesRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = compute.Policy() + pb_resp = compute.Policy.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = compute.Policy.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.compute_v1beta.LicenseCodesClient.get_iam_policy", + extra={ + "serviceName": "google.cloud.compute.v1beta.LicenseCodes", + "rpcName": "GetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _SetIamPolicy( + _BaseLicenseCodesRestTransport._BaseSetIamPolicy, LicenseCodesRestStub + ): + def __hash__(self): + return hash("LicenseCodesRestTransport.SetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: compute.SetIamPolicyLicenseCodeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyLicenseCodeRequest): + The request object. A request message for + LicenseCodes.SetIamPolicy. See the + method description for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members``, or + principals, to a single ``role``. Principals can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A ``role`` is a named list of + permissions; each ``role`` can be an IAM predefined role + or a user-created custom role. + + For some types of Google Cloud resources, a ``binding`` + can also specify a ``condition``, which is a logical + expression that allows access to a resource only if the + expression evaluates to ``true``. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + + **JSON example:** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } + + **YAML example:** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 + + For a description of IAM and its features, see the `IAM + documentation `__. + + """ + + http_options = ( + _BaseLicenseCodesRestTransport._BaseSetIamPolicy._get_http_options() + ) + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + transcoded_request = _BaseLicenseCodesRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) + + body = ( + _BaseLicenseCodesRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request + ) + ) + + # Jsonify the query params + query_params = ( + _BaseLicenseCodesRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.compute_v1beta.LicenseCodesClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.compute.v1beta.LicenseCodes", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = LicenseCodesRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = compute.Policy() + pb_resp = compute.Policy.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_set_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_set_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = compute.Policy.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.compute_v1beta.LicenseCodesClient.set_iam_policy", + extra={ + "serviceName": "google.cloud.compute.v1beta.LicenseCodes", + "rpcName": "SetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + class _TestIamPermissions( _BaseLicenseCodesRestTransport._BaseTestIamPermissions, LicenseCodesRestStub ): @@ -595,6 +1155,22 @@ def get(self) -> Callable[[compute.GetLicenseCodeRequest], compute.LicenseCode]: # In C++ this would require a dynamic_cast return self._Get(self._session, self._host, self._interceptor) # type: ignore + @property + def get_iam_policy( + self, + ) -> Callable[[compute.GetIamPolicyLicenseCodeRequest], compute.Policy]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + @property + def set_iam_policy( + self, + ) -> Callable[[compute.SetIamPolicyLicenseCodeRequest], compute.Policy]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + @property def test_iam_permissions( self, diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/rest_base.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/rest_base.py index 07390a22649d..6dc5e61e57dc 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/rest_base.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/license_codes/transports/rest_base.py @@ -133,6 +133,108 @@ def _get_query_params_json(transcoded_request): return query_params + class _BaseGetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/compute/beta/projects/{project}/global/licenseCodes/{resource}/getIamPolicy", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = compute.GetIamPolicyLicenseCodeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update( + _BaseLicenseCodesRestTransport._BaseGetIamPolicy._get_unset_required_fields( + query_params + ) + ) + + return query_params + + class _BaseSetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/compute/beta/projects/{project}/global/licenseCodes/{resource}/setIamPolicy", + "body": "global_set_policy_request_resource", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = compute.SetIamPolicyLicenseCodeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update( + _BaseLicenseCodesRestTransport._BaseSetIamPolicy._get_unset_required_fields( + query_params + ) + ) + + return query_params + class _BaseTestIamPermissions: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/zone_vm_extension_policies/client.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/zone_vm_extension_policies/client.py index 2dcb4b744b38..fd5b021874ea 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/zone_vm_extension_policies/client.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/services/zone_vm_extension_policies/client.py @@ -729,7 +729,8 @@ def delete_unary( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> compute.Operation: - r"""Deletes a specified zone VM extension policy. + r"""Deletes a specified zone VM extension policy within a + project. .. code-block:: python @@ -864,7 +865,8 @@ def delete( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> extended_operation.ExtendedOperation: - r"""Deletes a specified zone VM extension policy. + r"""Deletes a specified zone VM extension policy within a + project. .. code-block:: python @@ -1023,7 +1025,7 @@ def get( metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> compute.VmExtensionPolicy: r"""Retrieves details of a specific zone VM extension - policy. + policy within a project. .. code-block:: python @@ -1583,7 +1585,8 @@ def update_unary( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> compute.Operation: - r"""Modifies an existing zone VM extension policy. + r"""Modifies an existing zone VM extension policy within + a project. .. code-block:: python @@ -1731,7 +1734,8 @@ def update( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> extended_operation.ExtendedOperation: - r"""Modifies an existing zone VM extension policy. + r"""Modifies an existing zone VM extension policy within + a project. .. code-block:: python diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/types/__init__.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/types/__init__.py index d057050eb844..6b24984e3bd5 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/types/__init__.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/types/__init__.py @@ -230,6 +230,27 @@ CancelRegionInstanceGroupManagerResizeRequestRequest, CancelRequestRemovePeeringNetworkRequest, CancelRolloutRequest, + CapacityAdviceRequest, + CapacityAdviceRequestDistributionPolicy, + CapacityAdviceRequestDistributionPolicyZoneConfiguration, + CapacityAdviceRequestInstanceFlexibilityPolicy, + CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection, + CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelectionAttachedDisk, + CapacityAdviceRequestInstanceProperties, + CapacityAdviceRequestInstancePropertiesScheduling, + CapacityAdviceResponse, + CapacityAdviceResponseRecommendation, + CapacityAdviceResponseRecommendationScores, + CapacityAdviceResponseRecommendationShard, + CapacityAdviceRpcRequest, + CapacityHistoryAdviceRequest, + CapacityHistoryRequest, + CapacityHistoryRequestInstanceProperties, + CapacityHistoryRequestInstancePropertiesScheduling, + CapacityHistoryRequestLocationPolicy, + CapacityHistoryResponse, + CapacityHistoryResponsePreemptionRecord, + CapacityHistoryResponsePriceRecord, CircuitBreakers, CloneRulesFirewallPolicyRequest, CloneRulesNetworkFirewallPolicyRequest, @@ -450,6 +471,7 @@ FlexibleTimeRange, ForwardingRule, ForwardingRuleAggregatedList, + ForwardingRuleAttachedExtension, ForwardingRuleList, ForwardingRuleReference, ForwardingRuleServiceDirectoryRegistration, @@ -527,6 +549,7 @@ GetIamPolicyInstantSnapshotRequest, GetIamPolicyInterconnectAttachmentGroupRequest, GetIamPolicyInterconnectGroupRequest, + GetIamPolicyLicenseCodeRequest, GetIamPolicyLicenseRequest, GetIamPolicyMachineImageRequest, GetIamPolicyNetworkAttachmentRequest, @@ -1041,6 +1064,7 @@ InterconnectRemoteLocationPermittedConnections, InterconnectsGetDiagnosticsResponse, InterconnectsGetMacsecConfigResponse, + Interval, InvalidateCacheRegionUrlMapRequest, InvalidateCacheUrlMapRequest, Items, @@ -1221,10 +1245,12 @@ ManagedInstanceLastAttemptErrors, ManagedInstancePropertiesFromFlexibilityPolicy, ManagedInstanceScheduling, + ManagedInstanceShutdownDetails, ManagedInstanceVersion, Metadata, MetadataFilter, MetadataFilterLabelMatch, + Money, MoveAddressRequest, MoveDiskProjectRequest, MoveFirewallPolicyRequest, @@ -1741,6 +1767,7 @@ SetIamPolicyInstantSnapshotRequest, SetIamPolicyInterconnectAttachmentGroupRequest, SetIamPolicyInterconnectGroupRequest, + SetIamPolicyLicenseCodeRequest, SetIamPolicyLicenseRequest, SetIamPolicyMachineImageRequest, SetIamPolicyNetworkAttachmentRequest, @@ -2385,6 +2412,27 @@ "CancelRegionInstanceGroupManagerResizeRequestRequest", "CancelRequestRemovePeeringNetworkRequest", "CancelRolloutRequest", + "CapacityAdviceRequest", + "CapacityAdviceRequestDistributionPolicy", + "CapacityAdviceRequestDistributionPolicyZoneConfiguration", + "CapacityAdviceRequestInstanceFlexibilityPolicy", + "CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection", + "CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelectionAttachedDisk", + "CapacityAdviceRequestInstanceProperties", + "CapacityAdviceRequestInstancePropertiesScheduling", + "CapacityAdviceResponse", + "CapacityAdviceResponseRecommendation", + "CapacityAdviceResponseRecommendationScores", + "CapacityAdviceResponseRecommendationShard", + "CapacityAdviceRpcRequest", + "CapacityHistoryAdviceRequest", + "CapacityHistoryRequest", + "CapacityHistoryRequestInstanceProperties", + "CapacityHistoryRequestInstancePropertiesScheduling", + "CapacityHistoryRequestLocationPolicy", + "CapacityHistoryResponse", + "CapacityHistoryResponsePreemptionRecord", + "CapacityHistoryResponsePriceRecord", "CircuitBreakers", "CloneRulesFirewallPolicyRequest", "CloneRulesNetworkFirewallPolicyRequest", @@ -2605,6 +2653,7 @@ "FlexibleTimeRange", "ForwardingRule", "ForwardingRuleAggregatedList", + "ForwardingRuleAttachedExtension", "ForwardingRuleList", "ForwardingRuleReference", "ForwardingRuleServiceDirectoryRegistration", @@ -2682,6 +2731,7 @@ "GetIamPolicyInstantSnapshotRequest", "GetIamPolicyInterconnectAttachmentGroupRequest", "GetIamPolicyInterconnectGroupRequest", + "GetIamPolicyLicenseCodeRequest", "GetIamPolicyLicenseRequest", "GetIamPolicyMachineImageRequest", "GetIamPolicyNetworkAttachmentRequest", @@ -3196,6 +3246,7 @@ "InterconnectRemoteLocationPermittedConnections", "InterconnectsGetDiagnosticsResponse", "InterconnectsGetMacsecConfigResponse", + "Interval", "InvalidateCacheRegionUrlMapRequest", "InvalidateCacheUrlMapRequest", "Items", @@ -3376,10 +3427,12 @@ "ManagedInstanceLastAttemptErrors", "ManagedInstancePropertiesFromFlexibilityPolicy", "ManagedInstanceScheduling", + "ManagedInstanceShutdownDetails", "ManagedInstanceVersion", "Metadata", "MetadataFilter", "MetadataFilterLabelMatch", + "Money", "MoveAddressRequest", "MoveDiskProjectRequest", "MoveFirewallPolicyRequest", @@ -3896,6 +3949,7 @@ "SetIamPolicyInstantSnapshotRequest", "SetIamPolicyInterconnectAttachmentGroupRequest", "SetIamPolicyInterconnectGroupRequest", + "SetIamPolicyLicenseCodeRequest", "SetIamPolicyLicenseRequest", "SetIamPolicyMachineImageRequest", "SetIamPolicyNetworkAttachmentRequest", diff --git a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/types/compute.py b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/types/compute.py index f4c58dd757cb..2842bb4a6e27 100644 --- a/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/types/compute.py +++ b/packages/google-cloud-compute-v1beta/google/cloud/compute_v1beta/types/compute.py @@ -240,6 +240,27 @@ "CancelRegionInstanceGroupManagerResizeRequestRequest", "CancelRequestRemovePeeringNetworkRequest", "CancelRolloutRequest", + "CapacityAdviceRequest", + "CapacityAdviceRequestDistributionPolicy", + "CapacityAdviceRequestDistributionPolicyZoneConfiguration", + "CapacityAdviceRequestInstanceFlexibilityPolicy", + "CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection", + "CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelectionAttachedDisk", + "CapacityAdviceRequestInstanceProperties", + "CapacityAdviceRequestInstancePropertiesScheduling", + "CapacityAdviceResponse", + "CapacityAdviceResponseRecommendation", + "CapacityAdviceResponseRecommendationScores", + "CapacityAdviceResponseRecommendationShard", + "CapacityAdviceRpcRequest", + "CapacityHistoryAdviceRequest", + "CapacityHistoryRequest", + "CapacityHistoryRequestInstanceProperties", + "CapacityHistoryRequestInstancePropertiesScheduling", + "CapacityHistoryRequestLocationPolicy", + "CapacityHistoryResponse", + "CapacityHistoryResponsePreemptionRecord", + "CapacityHistoryResponsePriceRecord", "CircuitBreakers", "CloneRulesFirewallPolicyRequest", "CloneRulesNetworkFirewallPolicyRequest", @@ -460,6 +481,7 @@ "FlexibleTimeRange", "ForwardingRule", "ForwardingRuleAggregatedList", + "ForwardingRuleAttachedExtension", "ForwardingRuleList", "ForwardingRuleReference", "ForwardingRuleServiceDirectoryRegistration", @@ -539,6 +561,7 @@ "GetIamPolicyInstantSnapshotRequest", "GetIamPolicyInterconnectAttachmentGroupRequest", "GetIamPolicyInterconnectGroupRequest", + "GetIamPolicyLicenseCodeRequest", "GetIamPolicyLicenseRequest", "GetIamPolicyMachineImageRequest", "GetIamPolicyNetworkAttachmentRequest", @@ -1051,6 +1074,7 @@ "InterconnectRemoteLocationPermittedConnections", "InterconnectsGetDiagnosticsResponse", "InterconnectsGetMacsecConfigResponse", + "Interval", "InvalidateCacheRegionUrlMapRequest", "InvalidateCacheUrlMapRequest", "Items", @@ -1231,10 +1255,12 @@ "ManagedInstanceLastAttemptErrors", "ManagedInstancePropertiesFromFlexibilityPolicy", "ManagedInstanceScheduling", + "ManagedInstanceShutdownDetails", "ManagedInstanceVersion", "Metadata", "MetadataFilter", "MetadataFilterLabelMatch", + "Money", "MoveAddressRequest", "MoveDiskProjectRequest", "MoveFirewallPolicyRequest", @@ -1752,6 +1778,7 @@ "SetIamPolicyInstantSnapshotRequest", "SetIamPolicyInterconnectAttachmentGroupRequest", "SetIamPolicyInterconnectGroupRequest", + "SetIamPolicyLicenseCodeRequest", "SetIamPolicyLicenseRequest", "SetIamPolicyMachineImageRequest", "SetIamPolicyNetworkAttachmentRequest", @@ -5104,6 +5131,16 @@ class Purpose(proto.Enum): NAT_AUTO (163666477): External IP automatically reserved for Cloud NAT. + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP0 (119932186): + The global external address can only be + assigned to Global External Passthrough Network + Load Balancer forwarding rules, as an + Availability Group 0 address. + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP1 (119932187): + The global external address can only be + assigned to Global External Passthrough Network + Load Balancer forwarding rules, as an + Availability Group 1 address. PRIVATE_SERVICE_CONNECT (48134724): A private network IP address that can be used to configure Private Service Connect. This @@ -5125,6 +5162,8 @@ class Purpose(proto.Enum): GCE_ENDPOINT = 230515243 IPSEC_INTERCONNECT = 340437251 NAT_AUTO = 163666477 + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP0 = 119932186 + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP1 = 119932187 PRIVATE_SERVICE_CONNECT = 48134724 SERVERLESS = 270492508 SHARED_LOADBALANCER_VIP = 294447572 @@ -17608,7 +17647,7 @@ class AttachedDiskInitializeParams(proto.Message): Tag keys and values have the same definition as resource manager tags. Keys and values can be either in numeric format, such as ``tagKeys/{tag_key_id}`` and - ``tagValues/456`` or in namespaced format such as + ``tagValues/{tag_value_id}`` or in namespaced format such as ``{org_id|project_id}/{tag_key_short_name}`` and ``{tag_value_short_name}``. The field is ignored (both PUT & PATCH) when empty. @@ -18813,6 +18852,18 @@ class AutoscalingPolicy(proto.Message): During overlapping periods the greatest min_required_replicas of all scaling schedules is applied. Up to 128 scaling schedules are allowed. + stabilization_period_sec (int): + The number of seconds that autoscaler waits for load + stabilization before making scale-in decisions. This is + referred to as the `stabilization + period `__. + This might appear as a delay in scaling in but it is an + important mechanism for your application to not have + fluctuating size due to short term load fluctuations. + + The default stabilization period is 600 seconds. + + This field is a member of `oneof`_ ``_stabilization_period_sec``. """ class Mode(proto.Enum): @@ -18916,6 +18967,11 @@ class Mode(proto.Enum): message="AutoscalingPolicyScalingSchedule", ) ) + stabilization_period_sec: int = proto.Field( + proto.INT32, + number=420200243, + optional=True, + ) class AutoscalingPolicyCpuUtilization(proto.Message): @@ -21486,6 +21542,9 @@ class LoadBalancingScheme(proto.Enum): external Application Load Balancers, regional external Application Load Balancers, or regional external proxy Network Load Balancers. + EXTERNAL_PASSTHROUGH (216895232): + Signifies that this will be used for global + external passthrough Network Load Balancers. INTERNAL (279295677): Signifies that this will be used for internal passthrough Network Load Balancers. @@ -21502,6 +21561,7 @@ class LoadBalancingScheme(proto.Enum): UNDEFINED_LOAD_BALANCING_SCHEME = 0 EXTERNAL = 35607499 EXTERNAL_MANAGED = 512006923 + EXTERNAL_PASSTHROUGH = 216895232 INTERNAL = 279295677 INTERNAL_MANAGED = 37350397 INTERNAL_SELF_MANAGED = 236211150 @@ -26558,6 +26618,805 @@ class CancelRolloutRequest(proto.Message): ) +class CapacityAdviceRequest(proto.Message): + r"""A request to provide Assistant Scores. These scores determine + VM obtainability and preemption likelihood. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + distribution_policy (google.cloud.compute_v1beta.types.CapacityAdviceRequestDistributionPolicy): + Policy specifying the distribution of + instances across zones within the requested + region. + + This field is a member of `oneof`_ ``_distribution_policy``. + instance_flexibility_policy (google.cloud.compute_v1beta.types.CapacityAdviceRequestInstanceFlexibilityPolicy): + Policy for instance selectors. + + This field is a member of `oneof`_ ``_instance_flexibility_policy``. + instance_properties (google.cloud.compute_v1beta.types.CapacityAdviceRequestInstanceProperties): + Instance properties for this request. + + This field is a member of `oneof`_ ``_instance_properties``. + size (int): + The number of VM instances to request. + + This field is a member of `oneof`_ ``_size``. + """ + + distribution_policy: "CapacityAdviceRequestDistributionPolicy" = proto.Field( + proto.MESSAGE, + number=534558541, + optional=True, + message="CapacityAdviceRequestDistributionPolicy", + ) + instance_flexibility_policy: "CapacityAdviceRequestInstanceFlexibilityPolicy" = ( + proto.Field( + proto.MESSAGE, + number=26937090, + optional=True, + message="CapacityAdviceRequestInstanceFlexibilityPolicy", + ) + ) + instance_properties: "CapacityAdviceRequestInstanceProperties" = proto.Field( + proto.MESSAGE, + number=215355165, + optional=True, + message="CapacityAdviceRequestInstanceProperties", + ) + size: int = proto.Field( + proto.INT32, + number=3530753, + optional=True, + ) + + +class CapacityAdviceRequestDistributionPolicy(proto.Message): + r"""Distribution policy. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + target_shape (str): + Target distribution shape. You can specify the following + values:ANY, ANY_SINGLE_ZONE, or BALANCED. Check the + TargetShape enum for the list of possible values. + + This field is a member of `oneof`_ ``_target_shape``. + zones (MutableSequence[google.cloud.compute_v1beta.types.CapacityAdviceRequestDistributionPolicyZoneConfiguration]): + Zones where Capacity Advisor looks for + capacity. + """ + + class TargetShape(proto.Enum): + r"""Target distribution shape. You can specify the following values:ANY, + ANY_SINGLE_ZONE, or BALANCED. + + Values: + UNDEFINED_TARGET_SHAPE (0): + A value indicating that the enum field is not + set. + ANY (64972): + Picks zones for creating VM instances to + fulfill the requested number of VMs within + present resource constraints. + ANY_SINGLE_ZONE (61100880): + Creates all VM instances within a single + zone. The zone is selected based on the present + resource constraints. + BALANCED (468409608): + Prioritizes acquisition of resources, + scheduling VMs in zones where resources are + available while distributing VMs as evenly as + possible across selected zones to minimize the + impact of zonal failure. + TARGET_SHAPE_UNSPECIFIED (449316907): + No description available. + """ + + UNDEFINED_TARGET_SHAPE = 0 + ANY = 64972 + ANY_SINGLE_ZONE = 61100880 + BALANCED = 468409608 + TARGET_SHAPE_UNSPECIFIED = 449316907 + + target_shape: str = proto.Field( + proto.STRING, + number=338621299, + optional=True, + ) + zones: MutableSequence[ + "CapacityAdviceRequestDistributionPolicyZoneConfiguration" + ] = proto.RepeatedField( + proto.MESSAGE, + number=116085319, + message="CapacityAdviceRequestDistributionPolicyZoneConfiguration", + ) + + +class CapacityAdviceRequestDistributionPolicyZoneConfiguration(proto.Message): + r"""Zone configuration for the distribution policy. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + zone (str): + The URL of the zone. It can be a + partial or full URL. For example, the following + are valid values: + + + - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + - projects/project/zones/zone + - zones/zone + + This field is a member of `oneof`_ ``_zone``. + """ + + zone: str = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class CapacityAdviceRequestInstanceFlexibilityPolicy(proto.Message): + r"""Specification of alternative, flexible instance + configurations. + + Attributes: + instance_selections (MutableMapping[str, google.cloud.compute_v1beta.types.CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection]): + Named instance selections to configure + properties. The key is an arbitrary, unique + RFC1035 string that identifies the instance + selection. + """ + + instance_selections: MutableMapping[ + str, "CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection" + ] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=22954577, + message="CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection", + ) + + +class CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelection(proto.Message): + r"""Machine specification. + + Attributes: + disks (MutableSequence[google.cloud.compute_v1beta.types.CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelectionAttachedDisk]): + Local SSDs. + guest_accelerators (MutableSequence[google.cloud.compute_v1beta.types.AcceleratorConfig]): + Accelerators configuration. + machine_types (MutableSequence[str]): + Full machine-type names, e.g. + "n1-standard-16". + """ + + disks: MutableSequence[ + "CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelectionAttachedDisk" + ] = proto.RepeatedField( + proto.MESSAGE, + number=95594102, + message="CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelectionAttachedDisk", + ) + guest_accelerators: MutableSequence["AcceleratorConfig"] = proto.RepeatedField( + proto.MESSAGE, + number=463595119, + message="AcceleratorConfig", + ) + machine_types: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=79720065, + ) + + +class CapacityAdviceRequestInstanceFlexibilityPolicyInstanceSelectionAttachedDisk( + proto.Message +): + r"""Attached disk configuration. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + type_ (str): + Specifies the type of the disk. + This field must be set to SCRATCH. + Check the Type enum for the list of possible + values. + + This field is a member of `oneof`_ ``_type``. + """ + + class Type(proto.Enum): + r"""Specifies the type of the disk. + This field must be set to SCRATCH. + + Values: + UNDEFINED_TYPE (0): + A value indicating that the enum field is not + set. + DISK_TYPE_UNSPECIFIED (333621236): + No description available. + SCRATCH (496778970): + No description available. + """ + + UNDEFINED_TYPE = 0 + DISK_TYPE_UNSPECIFIED = 333621236 + SCRATCH = 496778970 + + type_: str = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class CapacityAdviceRequestInstanceProperties(proto.Message): + r"""Instance provisioning properties. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + scheduling (google.cloud.compute_v1beta.types.CapacityAdviceRequestInstancePropertiesScheduling): + Specifies the scheduling options. + + This field is a member of `oneof`_ ``_scheduling``. + """ + + scheduling: "CapacityAdviceRequestInstancePropertiesScheduling" = proto.Field( + proto.MESSAGE, + number=386688404, + optional=True, + message="CapacityAdviceRequestInstancePropertiesScheduling", + ) + + +class CapacityAdviceRequestInstancePropertiesScheduling(proto.Message): + r"""Defines the instance scheduling options. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + provisioning_model (str): + Specifies the provisioning model. + Check the ProvisioningModel enum for the list of + possible values. + + This field is a member of `oneof`_ ``_provisioning_model``. + """ + + class ProvisioningModel(proto.Enum): + r"""Specifies the provisioning model. + + Values: + UNDEFINED_PROVISIONING_MODEL (0): + A value indicating that the enum field is not + set. + FLEX_START (101746812): + Instance is provisioned using the Flex Start + provisioning model and has a limited runtime. + RESERVATION_BOUND (293538571): + Bound to the lifecycle of the reservation in + which it is provisioned. + SPOT (2552066): + Heavily discounted, no guaranteed runtime. + STANDARD (484642493): + Standard provisioning with user controlled + runtime, no discounts. + """ + + UNDEFINED_PROVISIONING_MODEL = 0 + FLEX_START = 101746812 + RESERVATION_BOUND = 293538571 + SPOT = 2552066 + STANDARD = 484642493 + + provisioning_model: str = proto.Field( + proto.STRING, + number=494423, + optional=True, + ) + + +class CapacityAdviceResponse(proto.Message): + r"""A response contains scoring recommendations. + + Attributes: + recommendations (MutableSequence[google.cloud.compute_v1beta.types.CapacityAdviceResponseRecommendation]): + Initially the API will provide one + recommendation which balances the individual + scores according to the service provider's + preference. + """ + + recommendations: MutableSequence["CapacityAdviceResponseRecommendation"] = ( + proto.RepeatedField( + proto.MESSAGE, + number=324515802, + message="CapacityAdviceResponseRecommendation", + ) + ) + + +class CapacityAdviceResponseRecommendation(proto.Message): + r"""Recommendation. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + scores (google.cloud.compute_v1beta.types.CapacityAdviceResponseRecommendationScores): + Scores for the recommendation. + + This field is a member of `oneof`_ ``_scores``. + shards (MutableSequence[google.cloud.compute_v1beta.types.CapacityAdviceResponseRecommendationShard]): + Shards represent blocks of uniform capacity + in recommendations. + """ + + scores: "CapacityAdviceResponseRecommendationScores" = proto.Field( + proto.MESSAGE, + number=165975073, + optional=True, + message="CapacityAdviceResponseRecommendationScores", + ) + shards: MutableSequence["CapacityAdviceResponseRecommendationShard"] = ( + proto.RepeatedField( + proto.MESSAGE, + number=170175573, + message="CapacityAdviceResponseRecommendationShard", + ) + ) + + +class CapacityAdviceResponseRecommendationScores(proto.Message): + r"""Groups information about a shard of capacity. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + estimated_uptime (str): + The estimated run time of the majority of + Spot VMs in the request before preemption. The + estimate is best-effort only. It is based on + historical data and current conditions. + + This field is a member of `oneof`_ ``_estimated_uptime``. + obtainability (float): + The obtainability score indicates the + likelihood of successfully obtaining + (provisioning) the requested number of VMs. The + score range is 0.0 through 1.0. Higher is + better. + + This field is a member of `oneof`_ ``_obtainability``. + """ + + estimated_uptime: str = proto.Field( + proto.STRING, + number=223976779, + optional=True, + ) + obtainability: float = proto.Field( + proto.DOUBLE, + number=260735205, + optional=True, + ) + + +class CapacityAdviceResponseRecommendationShard(proto.Message): + r"""Shards represent blocks of uniform capacity in + recommendations. Each shard is for a single zone and a single + machine shape. Each shard defines a size expressed as the number + of VMs. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + instance_count (int): + The number of instances. + + This field is a member of `oneof`_ ``_instance_count``. + machine_type (str): + The machine type corresponds to the instance + selection in the request. + + This field is a member of `oneof`_ ``_machine_type``. + provisioning_model (str): + The provisioning model that you want to view + recommendations for. Check the ProvisioningModel + enum for the list of possible values. + + This field is a member of `oneof`_ ``_provisioning_model``. + zone (str): + Output only. The zone name for this shard. + + This field is a member of `oneof`_ ``_zone``. + """ + + class ProvisioningModel(proto.Enum): + r"""The provisioning model that you want to view recommendations + for. + + Values: + UNDEFINED_PROVISIONING_MODEL (0): + A value indicating that the enum field is not + set. + FLEX_START (101746812): + Instance is provisioned using the Flex Start + provisioning model and has a limited runtime. + RESERVATION_BOUND (293538571): + Bound to the lifecycle of the reservation in + which it is provisioned. + SPOT (2552066): + Heavily discounted, no guaranteed runtime. + STANDARD (484642493): + Standard provisioning with user controlled + runtime, no discounts. + """ + + UNDEFINED_PROVISIONING_MODEL = 0 + FLEX_START = 101746812 + RESERVATION_BOUND = 293538571 + SPOT = 2552066 + STANDARD = 484642493 + + instance_count: int = proto.Field( + proto.INT32, + number=77317349, + optional=True, + ) + machine_type: str = proto.Field( + proto.STRING, + number=227711026, + optional=True, + ) + provisioning_model: str = proto.Field( + proto.STRING, + number=494423, + optional=True, + ) + zone: str = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class CapacityAdviceRpcRequest(proto.Message): + r"""A request message for Advice.Capacity. See the method + description for details. + + Attributes: + capacity_advice_request_resource (google.cloud.compute_v1beta.types.CapacityAdviceRequest): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + """ + + capacity_advice_request_resource: "CapacityAdviceRequest" = proto.Field( + proto.MESSAGE, + number=176354208, + message="CapacityAdviceRequest", + ) + project: str = proto.Field( + proto.STRING, + number=227560217, + ) + region: str = proto.Field( + proto.STRING, + number=138946292, + ) + + +class CapacityHistoryAdviceRequest(proto.Message): + r"""A request message for Advice.CapacityHistory. See the method + description for details. + + Attributes: + capacity_history_request_resource (google.cloud.compute_v1beta.types.CapacityHistoryRequest): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + """ + + capacity_history_request_resource: "CapacityHistoryRequest" = proto.Field( + proto.MESSAGE, + number=182030318, + message="CapacityHistoryRequest", + ) + project: str = proto.Field( + proto.STRING, + number=227560217, + ) + region: str = proto.Field( + proto.STRING, + number=138946292, + ) + + +class CapacityHistoryRequest(proto.Message): + r"""A request to get the capacity history. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + instance_properties (google.cloud.compute_v1beta.types.CapacityHistoryRequestInstanceProperties): + Instance properties for this request. + + This field is a member of `oneof`_ ``_instance_properties``. + location_policy (google.cloud.compute_v1beta.types.CapacityHistoryRequestLocationPolicy): + Location policy for this request. + + This field is a member of `oneof`_ ``_location_policy``. + types (MutableSequence[str]): + List of history types to get capacity history + for. Check the Types enum for the list of + possible values. + """ + + class Types(proto.Enum): + r""" + + Values: + UNDEFINED_TYPES (0): + A value indicating that the enum field is not + set. + HISTORY_TYPE_UNSPECIFIED (58549757): + No description available. + PREEMPTION (512869337): + Preemption history. + PRICE (76396841): + Price history. + """ + + UNDEFINED_TYPES = 0 + HISTORY_TYPE_UNSPECIFIED = 58549757 + PREEMPTION = 512869337 + PRICE = 76396841 + + instance_properties: "CapacityHistoryRequestInstanceProperties" = proto.Field( + proto.MESSAGE, + number=215355165, + optional=True, + message="CapacityHistoryRequestInstanceProperties", + ) + location_policy: "CapacityHistoryRequestLocationPolicy" = proto.Field( + proto.MESSAGE, + number=465689852, + optional=True, + message="CapacityHistoryRequestLocationPolicy", + ) + types: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=110844025, + ) + + +class CapacityHistoryRequestInstanceProperties(proto.Message): + r"""Instance properties for this request. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + machine_type (str): + The machine type for the VM, such as ``n2-standard-4``. + + This field is a member of `oneof`_ ``_machine_type``. + scheduling (google.cloud.compute_v1beta.types.CapacityHistoryRequestInstancePropertiesScheduling): + Specifies the scheduling options. + + This field is a member of `oneof`_ ``_scheduling``. + """ + + machine_type: str = proto.Field( + proto.STRING, + number=227711026, + optional=True, + ) + scheduling: "CapacityHistoryRequestInstancePropertiesScheduling" = proto.Field( + proto.MESSAGE, + number=386688404, + optional=True, + message="CapacityHistoryRequestInstancePropertiesScheduling", + ) + + +class CapacityHistoryRequestInstancePropertiesScheduling(proto.Message): + r"""Scheduling options. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + provisioning_model (str): + The provisioning model to get capacity + history for. This field must be set to SPOT. + + For more information, see + Compute Engine instances provisioning models. + Check the ProvisioningModel enum for the list of + possible values. + + This field is a member of `oneof`_ ``_provisioning_model``. + """ + + class ProvisioningModel(proto.Enum): + r"""The provisioning model to get capacity history for. + This field must be set to SPOT. + + For more information, see + Compute Engine instances provisioning models. + + Values: + UNDEFINED_PROVISIONING_MODEL (0): + A value indicating that the enum field is not + set. + FLEX_START (101746812): + Instance is provisioned using the Flex Start + provisioning model and has a limited runtime. + RESERVATION_BOUND (293538571): + Bound to the lifecycle of the reservation in + which it is provisioned. + SPOT (2552066): + Heavily discounted, no guaranteed runtime. + STANDARD (484642493): + Standard provisioning with user controlled + runtime, no discounts. + """ + + UNDEFINED_PROVISIONING_MODEL = 0 + FLEX_START = 101746812 + RESERVATION_BOUND = 293538571 + SPOT = 2552066 + STANDARD = 484642493 + + provisioning_model: str = proto.Field( + proto.STRING, + number=494423, + optional=True, + ) + + +class CapacityHistoryRequestLocationPolicy(proto.Message): + r"""Location policy for this request. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + location (str): + The region or zone to get capacity history + for. + It can be a partial or full URL. For example, + the following are valid values: + + + - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + - projects/project/zones/zone + - zones/zone + + This field is optional. + + This field is a member of `oneof`_ ``_location``. + """ + + location: str = proto.Field( + proto.STRING, + number=290430901, + optional=True, + ) + + +class CapacityHistoryResponse(proto.Message): + r"""Contains the capacity history. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + location (str): + + This field is a member of `oneof`_ ``_location``. + machine_type (str): + + This field is a member of `oneof`_ ``_machine_type``. + preemption_history (MutableSequence[google.cloud.compute_v1beta.types.CapacityHistoryResponsePreemptionRecord]): + + price_history (MutableSequence[google.cloud.compute_v1beta.types.CapacityHistoryResponsePriceRecord]): + + """ + + location: str = proto.Field( + proto.STRING, + number=290430901, + optional=True, + ) + machine_type: str = proto.Field( + proto.STRING, + number=227711026, + optional=True, + ) + preemption_history: MutableSequence["CapacityHistoryResponsePreemptionRecord"] = ( + proto.RepeatedField( + proto.MESSAGE, + number=364018222, + message="CapacityHistoryResponsePreemptionRecord", + ) + ) + price_history: MutableSequence["CapacityHistoryResponsePriceRecord"] = ( + proto.RepeatedField( + proto.MESSAGE, + number=326230942, + message="CapacityHistoryResponsePriceRecord", + ) + ) + + +class CapacityHistoryResponsePreemptionRecord(proto.Message): + r""" + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + interval (google.cloud.compute_v1beta.types.Interval): + + This field is a member of `oneof`_ ``_interval``. + preemption_rate (float): + + This field is a member of `oneof`_ ``_preemption_rate``. + """ + + interval: "Interval" = proto.Field( + proto.MESSAGE, + number=33547461, + optional=True, + message="Interval", + ) + preemption_rate: float = proto.Field( + proto.DOUBLE, + number=140651910, + optional=True, + ) + + +class CapacityHistoryResponsePriceRecord(proto.Message): + r""" + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + interval (google.cloud.compute_v1beta.types.Interval): + + This field is a member of `oneof`_ ``_interval``. + list_price (google.cloud.compute_v1beta.types.Money): + + This field is a member of `oneof`_ ``_list_price``. + """ + + interval: "Interval" = proto.Field( + proto.MESSAGE, + number=33547461, + optional=True, + message="Interval", + ) + list_price: "Money" = proto.Field( + proto.MESSAGE, + number=167990888, + optional=True, + message="Money", + ) + + class CircuitBreakers(proto.Message): r"""Settings controlling the volume of requests, connections and retries to this backend service. @@ -27027,7 +27886,7 @@ class Commitment(proto.Message): GENERAL_PURPOSE_E2,GENERAL_PURPOSE_N2, GENERAL_PURPOSE_N2D,GENERAL_PURPOSE_N4, GENERAL_PURPOSE_T2D,GRAPHICS_OPTIMIZED, - GRAPHICS_OPTIMIZED_G4,MEMORY_OPTIMIZED, + GRAPHICS_OPTIMIZED_G4,GRAPHICS_OPTIMIZED_G4_VGPU,MEMORY_OPTIMIZED, MEMORY_OPTIMIZED_M3,MEMORY_OPTIMIZED_X4, STORAGE_OPTIMIZED_Z3. For example, type MEMORY_OPTIMIZED specifies a commitment that applies only to eligible @@ -27137,7 +27996,7 @@ class Type(proto.Enum): GENERAL_PURPOSE_E2,GENERAL_PURPOSE_N2, GENERAL_PURPOSE_N2D,GENERAL_PURPOSE_N4, GENERAL_PURPOSE_T2D,GRAPHICS_OPTIMIZED, - GRAPHICS_OPTIMIZED_G4,MEMORY_OPTIMIZED, + GRAPHICS_OPTIMIZED_G4,GRAPHICS_OPTIMIZED_G4_VGPU,MEMORY_OPTIMIZED, MEMORY_OPTIMIZED_M3,MEMORY_OPTIMIZED_X4, STORAGE_OPTIMIZED_Z3. For example, type MEMORY_OPTIMIZED specifies a commitment that applies only to eligible resources of memory optimized M1 and M2 machine @@ -27196,6 +28055,8 @@ class Type(proto.Enum): No description available. GRAPHICS_OPTIMIZED_G4 (54029369): No description available. + GRAPHICS_OPTIMIZED_G4_VGPU (298988732): + No description available. MEMORY_OPTIMIZED (281753417): No description available. MEMORY_OPTIMIZED_M3 (276301372): @@ -27262,6 +28123,7 @@ class Type(proto.Enum): GENERAL_PURPOSE_T2D = 232477166 GRAPHICS_OPTIMIZED = 68500563 GRAPHICS_OPTIMIZED_G4 = 54029369 + GRAPHICS_OPTIMIZED_G4_VGPU = 298988732 MEMORY_OPTIMIZED = 281753417 MEMORY_OPTIMIZED_M3 = 276301372 MEMORY_OPTIMIZED_M4 = 276301373 @@ -30412,6 +31274,12 @@ class DeleteInstanceGroupManagerRequest(proto.Message): instance_group_manager (str): The name of the managed instance group to delete. + no_graceful_shutdown (bool): + When set, graceful shutdown is skipped for + instance deletion even if it's configured for + the instances. + + This field is a member of `oneof`_ ``_no_graceful_shutdown``. project (str): Project ID for this request. request_id (str): @@ -30445,6 +31313,11 @@ class DeleteInstanceGroupManagerRequest(proto.Message): proto.STRING, number=249363395, ) + no_graceful_shutdown: bool = proto.Field( + proto.BOOL, + number=336255890, + optional=True, + ) project: str = proto.Field( proto.STRING, number=227560217, @@ -30717,8 +31590,93 @@ class DeleteInstancesInstanceGroupManagerRequest(proto.Message): The name of the managed instance group. instance_group_managers_delete_instances_request_resource (google.cloud.compute_v1beta.types.InstanceGroupManagersDeleteInstancesRequest): The body resource for this request + no_graceful_shutdown (bool): + When set, graceful shutdown is skipped for + instance deletion even if it's configured for + the instances. + + This field is a member of `oneof`_ ``_no_graceful_shutdown``. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. + + For example, consider a situation where you make + an initial request and the request times out. If + you make the request again with the same request + ID, the server can check if original operation + with the same request ID was received, and if + so, will ignore the second request. This + prevents clients from accidentally creating + duplicate commitments. + + The request ID must be + a valid UUID with the exception that zero UUID + is not supported + (00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of thezone where the managed + instance group is located. + """ + + instance_group_manager: str = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_delete_instances_request_resource: "InstanceGroupManagersDeleteInstancesRequest" = proto.Field( + proto.MESSAGE, + number=166421252, + message="InstanceGroupManagersDeleteInstancesRequest", + ) + no_graceful_shutdown: bool = proto.Field( + proto.BOOL, + number=336255890, + optional=True, + ) + project: str = proto.Field( + proto.STRING, + number=227560217, + ) + request_id: str = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone: str = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteInstancesRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.DeleteInstances. See the method + description for details. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + instance_group_manager (str): + Name of the managed instance group. + no_graceful_shutdown (bool): + When set, graceful shutdown is skipped for + instance deletion even if it's configured for + the instances. + + This field is a member of `oneof`_ ``_no_graceful_shutdown``. project (str): Project ID for this request. + region (str): + Name of the region scoping this request. + region_instance_group_managers_delete_instances_request_resource (google.cloud.compute_v1beta.types.RegionInstanceGroupManagersDeleteInstancesRequest): + The body resource for this request request_id (str): An optional request ID to identify requests. Specify a unique request ID so that if you must @@ -30741,80 +31699,17 @@ class DeleteInstancesInstanceGroupManagerRequest(proto.Message): (00000000-0000-0000-0000-000000000000). This field is a member of `oneof`_ ``_request_id``. - zone (str): - The name of thezone where the managed - instance group is located. """ instance_group_manager: str = proto.Field( proto.STRING, number=249363395, ) - instance_group_managers_delete_instances_request_resource: "InstanceGroupManagersDeleteInstancesRequest" = proto.Field( - proto.MESSAGE, - number=166421252, - message="InstanceGroupManagersDeleteInstancesRequest", - ) - project: str = proto.Field( - proto.STRING, - number=227560217, - ) - request_id: str = proto.Field( - proto.STRING, - number=37109963, + no_graceful_shutdown: bool = proto.Field( + proto.BOOL, + number=336255890, optional=True, ) - zone: str = proto.Field( - proto.STRING, - number=3744684, - ) - - -class DeleteInstancesRegionInstanceGroupManagerRequest(proto.Message): - r"""A request message for - RegionInstanceGroupManagers.DeleteInstances. See the method - description for details. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - instance_group_manager (str): - Name of the managed instance group. - project (str): - Project ID for this request. - region (str): - Name of the region scoping this request. - region_instance_group_managers_delete_instances_request_resource (google.cloud.compute_v1beta.types.RegionInstanceGroupManagersDeleteInstancesRequest): - The body resource for this request - request_id (str): - An optional request ID to identify requests. - Specify a unique request ID so that if you must - retry your request, the server will know to - ignore the request if it has already been - completed. - - For example, consider a situation where you make - an initial request and the request times out. If - you make the request again with the same request - ID, the server can check if original operation - with the same request ID was received, and if - so, will ignore the second request. This - prevents clients from accidentally creating - duplicate commitments. - - The request ID must be - a valid UUID with the exception that zero UUID - is not supported - (00000000-0000-0000-0000-000000000000). - - This field is a member of `oneof`_ ``_request_id``. - """ - - instance_group_manager: str = proto.Field( - proto.STRING, - number=249363395, - ) project: str = proto.Field( proto.STRING, number=227560217, @@ -32677,6 +33572,12 @@ class DeleteRegionInstanceGroupManagerRequest(proto.Message): Attributes: instance_group_manager (str): Name of the managed instance group to delete. + no_graceful_shutdown (bool): + When set, graceful shutdown is skipped for + instance deletion even if it's configured for + the instances. + + This field is a member of `oneof`_ ``_no_graceful_shutdown``. project (str): Project ID for this request. region (str): @@ -32709,6 +33610,11 @@ class DeleteRegionInstanceGroupManagerRequest(proto.Message): proto.STRING, number=249363395, ) + no_graceful_shutdown: bool = proto.Field( + proto.BOOL, + number=336255890, + optional=True, + ) project: str = proto.Field( proto.STRING, number=227560217, @@ -37382,7 +38288,7 @@ class DiskParams(proto.Message): Tag keys and values have the same definition as resource manager tags. Keys and values can be either in numeric format, such as ``tagKeys/{tag_key_id}`` and - ``tagValues/456`` or in namespaced format such as + ``tagValues/{tag_value_id}`` or in namespaced format such as ``{org_id|project_id}/{tag_key_short_name}`` and ``{tag_value_short_name}``. The field is ignored (both PUT & PATCH) when empty. @@ -39914,8 +40820,8 @@ class FirewallPolicy(proto.Message): This field is a member of `oneof`_ ``_policy_source``. policy_type (str): - The type of the firewall policy. This field can be - eitherVPC_POLICY or RDMA_ROCE_POLICY. + The type of the firewall policy. This field can be one of + VPC_POLICY, RDMA_ROCE_POLICY or ULL_POLICY. Note: if not specified then VPC_POLICY will be used. Check the PolicyType enum for the list of possible values. @@ -39993,8 +40899,8 @@ class PolicySource(proto.Enum): USER_DEFINED = 491485557 class PolicyType(proto.Enum): - r"""The type of the firewall policy. This field can be eitherVPC_POLICY - or RDMA_ROCE_POLICY. + r"""The type of the firewall policy. This field can be one of + VPC_POLICY, RDMA_ROCE_POLICY or ULL_POLICY. Note: if not specified then VPC_POLICY will be used. @@ -40948,8 +41854,8 @@ class FixedOrPercent(proto.Message): Attributes: calculated (int): - Output only. [Output Only] Absolute value of VM instances - calculated based on the specific mode. + Output only. Absolute value of VM instances calculated based + on the specific mode. :: @@ -41122,6 +42028,8 @@ class ForwardingRule(proto.Message): address number. This field is a member of `oneof`_ ``_I_p_address``. + I_p_addresses (MutableSequence[str]): + I_p_protocol (str): The IP protocol to which this rule applies. @@ -41175,6 +42083,18 @@ class ForwardingRule(proto.Message): allow_psc_packet_injection (bool): This field is a member of `oneof`_ ``_allow_psc_packet_injection``. + attached_extensions (MutableSequence[google.cloud.compute_v1beta.types.ForwardingRuleAttachedExtension]): + Output only. [Output Only]. The extensions that are attached + to this ForwardingRule. + availability_group (str): + [Output Only] Specifies the availability group of the + forwarding rule. This field is for use by global external + passthrough load balancers (load balancing scheme + EXTERNAL_PASSTHROUGH) and is set for the child forwarding + rules only. Check the AvailabilityGroup enum for the list of + possible values. + + This field is a member of `oneof`_ ``_availability_group``. backend_service (str): Identifies the backend service to which the forwarding rule sends traffic. Required for @@ -41192,6 +42112,14 @@ class ForwardingRule(proto.Message): forwarding rule does not have sourceIPRanges specified. This field is a member of `oneof`_ ``_base_forwarding_rule``. + child_forwarding_rules (MutableSequence[str]): + Output only. [Output Only] Applicable only to the parent + forwarding rule of global external passthrough load + balancers. This field contains the list of child forwarding + rule URLs associated with the parent forwarding rule: one + for each availability group. AVAILABILITY_GROUP0 will be the + first element, and AVAILABILITY_GROUP1 will be the second + element. creation_timestamp (str): Output only. [Output Only] Creation timestamp inRFC3339 text format. @@ -41407,6 +42335,13 @@ class ForwardingRule(proto.Message): set, this field is not mutable. This field is a member of `oneof`_ ``_no_automate_dns_zone``. + parent_forwarding_rule (str): + Output only. [Output Only] Applicable only to the child + forwarding rules of global external passthrough load + balancers. This field contains the URL of the parent + forwarding rule. + + This field is a member of `oneof`_ ``_parent_forwarding_rule``. port_range (str): The ports, portRange, and allPorts fields are mutually exclusive. Only packets addressed to ports in the specified @@ -41566,6 +42501,29 @@ class ForwardingRule(proto.Message): This field is a member of `oneof`_ ``_target``. """ + class AvailabilityGroup(proto.Enum): + r"""[Output Only] Specifies the availability group of the forwarding + rule. This field is for use by global external passthrough load + balancers (load balancing scheme EXTERNAL_PASSTHROUGH) and is set + for the child forwarding rules only. + + Values: + UNDEFINED_AVAILABILITY_GROUP (0): + A value indicating that the enum field is not + set. + AVAILABILITY_GROUP0 (79044885): + No description available. + AVAILABILITY_GROUP1 (79044886): + No description available. + AVAILABILITY_GROUP_UNSPECIFIED (510741331): + No description available. + """ + + UNDEFINED_AVAILABILITY_GROUP = 0 + AVAILABILITY_GROUP0 = 79044885 + AVAILABILITY_GROUP1 = 79044886 + AVAILABILITY_GROUP_UNSPECIFIED = 510741331 + class ExternalManagedBackendBucketMigrationState(proto.Enum): r"""Specifies the canary migration state for the backend buckets attached to this forwarding rule. Possible values are PREPARE, @@ -41677,6 +42635,8 @@ class LoadBalancingScheme(proto.Enum): No description available. EXTERNAL_MANAGED (512006923): No description available. + EXTERNAL_PASSTHROUGH (216895232): + No description available. INTERNAL (279295677): No description available. INTERNAL_MANAGED (37350397): @@ -41690,6 +42650,7 @@ class LoadBalancingScheme(proto.Enum): UNDEFINED_LOAD_BALANCING_SCHEME = 0 EXTERNAL = 35607499 EXTERNAL_MANAGED = 512006923 + EXTERNAL_PASSTHROUGH = 216895232 INTERNAL = 279295677 INTERNAL_MANAGED = 37350397 INTERNAL_SELF_MANAGED = 236211150 @@ -41773,6 +42734,10 @@ class PscConnectionStatus(proto.Enum): number=42976943, optional=True, ) + I_p_addresses: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=498656157, + ) I_p_protocol: str = proto.Field( proto.STRING, number=488094525, @@ -41798,6 +42763,18 @@ class PscConnectionStatus(proto.Enum): number=272272565, optional=True, ) + attached_extensions: MutableSequence["ForwardingRuleAttachedExtension"] = ( + proto.RepeatedField( + proto.MESSAGE, + number=385226127, + message="ForwardingRuleAttachedExtension", + ) + ) + availability_group: str = proto.Field( + proto.STRING, + number=62963355, + optional=True, + ) backend_service: str = proto.Field( proto.STRING, number=306946058, @@ -41808,6 +42785,10 @@ class PscConnectionStatus(proto.Enum): number=524873104, optional=True, ) + child_forwarding_rules: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=334878200, + ) creation_timestamp: str = proto.Field( proto.STRING, number=30525366, @@ -41898,6 +42879,11 @@ class PscConnectionStatus(proto.Enum): number=64546991, optional=True, ) + parent_forwarding_rule: str = proto.Field( + proto.STRING, + number=329325929, + optional=True, + ) port_range: str = proto.Field( proto.STRING, number=217518079, @@ -42049,6 +43035,27 @@ def raw_page(self): ) +class ForwardingRuleAttachedExtension(proto.Message): + r"""Reference to an extension resource that is attached to this + ForwardingRule. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + reference (str): + Output only. The resource name. + + This field is a member of `oneof`_ ``_reference``. + """ + + reference: str = proto.Field( + proto.STRING, + number=148586315, + optional=True, + ) + + class ForwardingRuleList(proto.Message): r"""Contains a list of ForwardingRule resources. @@ -44945,6 +45952,9 @@ class GetForwardingRuleRequest(proto.Message): r"""A request message for ForwardingRules.Get. See the method description for details. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: forwarding_rule (str): Name of the ForwardingRule resource to @@ -44953,8 +45963,32 @@ class GetForwardingRuleRequest(proto.Message): Project ID for this request. region (str): Name of the region scoping this request. + view (str): + Check the View enum for the list of possible + values. + + This field is a member of `oneof`_ ``_view``. """ + class View(proto.Enum): + r""" + + Values: + UNDEFINED_VIEW (0): + A value indicating that the enum field is not + set. + BASIC (62970894): + The default view of a ForwardingRule, which + includes the basic fields. + FULL (2169487): + The full view, including the + ForwardingRule.\ ``attached_extensions`` field. + """ + + UNDEFINED_VIEW = 0 + BASIC = 62970894 + FULL = 2169487 + forwarding_rule: str = proto.Field( proto.STRING, number=269964030, @@ -44967,6 +46001,11 @@ class GetForwardingRuleRequest(proto.Message): proto.STRING, number=138946292, ) + view: str = proto.Field( + proto.STRING, + number=3619493, + optional=True, + ) class GetFromFamilyImageRequest(proto.Message): @@ -45046,14 +46085,41 @@ class GetGlobalForwardingRuleRequest(proto.Message): r"""A request message for GlobalForwardingRules.Get. See the method description for details. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: forwarding_rule (str): Name of the ForwardingRule resource to return. project (str): Project ID for this request. + view (str): + Check the View enum for the list of possible + values. + + This field is a member of `oneof`_ ``_view``. """ + class View(proto.Enum): + r""" + + Values: + UNDEFINED_VIEW (0): + A value indicating that the enum field is not + set. + BASIC (62970894): + The default view of a ForwardingRule, which + includes the basic fields. + FULL (2169487): + The full view, including the + ForwardingRule.\ ``attached_extensions`` field. + """ + + UNDEFINED_VIEW = 0 + BASIC = 62970894 + FULL = 2169487 + forwarding_rule: str = proto.Field( proto.STRING, number=269964030, @@ -45062,6 +46128,11 @@ class GetGlobalForwardingRuleRequest(proto.Message): proto.STRING, number=227560217, ) + view: str = proto.Field( + proto.STRING, + number=3619493, + optional=True, + ) class GetGlobalNetworkEndpointGroupRequest(proto.Message): @@ -45644,8 +46715,120 @@ class GetIamPolicyInstanceTemplateRequest(proto.Message): ) -class GetIamPolicyInstantSnapshotGroupRequest(proto.Message): - r"""A request message for InstantSnapshotGroups.GetIamPolicy. See +class GetIamPolicyInstantSnapshotGroupRequest(proto.Message): + r"""A request message for InstantSnapshotGroups.GetIamPolicy. See + the method description for details. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + """ + + options_requested_policy_version: int = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project: str = proto.Field( + proto.STRING, + number=227560217, + ) + resource: str = proto.Field( + proto.STRING, + number=195806222, + ) + zone: str = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetIamPolicyInstantSnapshotRequest(proto.Message): + r"""A request message for InstantSnapshots.GetIamPolicy. See the + method description for details. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + """ + + options_requested_policy_version: int = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project: str = proto.Field( + proto.STRING, + number=227560217, + ) + resource: str = proto.Field( + proto.STRING, + number=195806222, + ) + zone: str = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetIamPolicyInterconnectAttachmentGroupRequest(proto.Message): + r"""A request message for + InterconnectAttachmentGroups.GetIamPolicy. See the method + description for details. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version: int = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project: str = proto.Field( + proto.STRING, + number=227560217, + ) + resource: str = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetIamPolicyInterconnectGroupRequest(proto.Message): + r"""A request message for InterconnectGroups.GetIamPolicy. See the method description for details. @@ -45660,8 +46843,6 @@ class GetIamPolicyInstantSnapshotGroupRequest(proto.Message): Project ID for this request. resource (str): Name or id of the resource for this request. - zone (str): - The name of the zone for this request. """ options_requested_policy_version: int = proto.Field( @@ -45677,90 +46858,13 @@ class GetIamPolicyInstantSnapshotGroupRequest(proto.Message): proto.STRING, number=195806222, ) - zone: str = proto.Field( - proto.STRING, - number=3744684, - ) -class GetIamPolicyInstantSnapshotRequest(proto.Message): - r"""A request message for InstantSnapshots.GetIamPolicy. See the +class GetIamPolicyLicenseCodeRequest(proto.Message): + r"""A request message for LicenseCodes.GetIamPolicy. See the method description for details. - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - options_requested_policy_version (int): - Requested IAM Policy version. - - This field is a member of `oneof`_ ``_options_requested_policy_version``. - project (str): - Project ID for this request. - resource (str): - Name or id of the resource for this request. - zone (str): - The name of the zone for this request. - """ - - options_requested_policy_version: int = proto.Field( - proto.INT32, - number=499220029, - optional=True, - ) - project: str = proto.Field( - proto.STRING, - number=227560217, - ) - resource: str = proto.Field( - proto.STRING, - number=195806222, - ) - zone: str = proto.Field( - proto.STRING, - number=3744684, - ) - - -class GetIamPolicyInterconnectAttachmentGroupRequest(proto.Message): - r"""A request message for - InterconnectAttachmentGroups.GetIamPolicy. See the method - description for details. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - options_requested_policy_version (int): - Requested IAM Policy version. - - This field is a member of `oneof`_ ``_options_requested_policy_version``. - project (str): - Project ID for this request. - resource (str): - Name or id of the resource for this request. - """ - - options_requested_policy_version: int = proto.Field( - proto.INT32, - number=499220029, - optional=True, - ) - project: str = proto.Field( - proto.STRING, - number=227560217, - ) - resource: str = proto.Field( - proto.STRING, - number=195806222, - ) - - -class GetIamPolicyInterconnectGroupRequest(proto.Message): - r"""A request message for InterconnectGroups.GetIamPolicy. See - the method description for details. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: @@ -50357,13 +51461,12 @@ class GetVersionOperationMetadataSbomInfo(proto.Message): Attributes: current_component_versions (MutableMapping[str, str]): - SBOM versions currently applied to the - resource. The key is the component name and the - value is the version. + A mapping of components to their + currently-applied versions or other appropriate + identifiers. target_component_versions (MutableMapping[str, str]): - SBOM versions scheduled for the next - maintenance. The key is the component name and - the value is the version. + A mapping of components to their target + versions or other appropriate identifiers. """ current_component_versions: MutableMapping[str, str] = proto.MapField( @@ -56962,7 +58065,7 @@ class ImageParams(proto.Message): Tag keys and values have the same definition as resource manager tags. Keys and values can be either in numeric format, such as ``tagKeys/{tag_key_id}`` and - ``tagValues/456`` or in namespaced format such as + ``tagValues/{tag_value_id}`` or in namespaced format such as ``{org_id|project_id}/{tag_key_short_name}`` and ``{tag_value_short_name}``. The field is ignored (both PUT & PATCH) when empty. @@ -64574,7 +65677,7 @@ class InstanceGroupManagerAggregatedList(proto.Message): Attributes: id (str): - Output only. [Output Only] Unique identifier for the + Output only. Unique identifier for the resource; defined by the server. This field is a member of `oneof`_ ``_id``. @@ -64582,29 +65685,32 @@ class InstanceGroupManagerAggregatedList(proto.Message): A list of InstanceGroupManagersScopedList resources. kind (str): - Output only. [Output Only] The resource type, which is - alwayscompute#instanceGroupManagerAggregatedList for an - aggregated list of managed instance groups. + Output only. The resource type, which is + alwayscompute#instanceGroupManagerAggregatedList + for an aggregated list of managed instance + groups. This field is a member of `oneof`_ ``_kind``. next_page_token (str): - Output only. [Output Only] This token allows you to get the - next page of results for list requests. If the number of - results is larger thanmaxResults, use the nextPageToken as a - value for the query parameter pageToken in the next list - request. Subsequent list requests will have their own - nextPageToken to continue paging through the results. + Output only. This token allows you to get the + next page of results for list requests. If the + number of results is larger thanmaxResults, use + the nextPageToken as a value for the query + parameter pageToken in the next list request. + Subsequent list requests will have their own + nextPageToken to continue paging through the + results. This field is a member of `oneof`_ ``_next_page_token``. self_link (str): - Output only. [Output Only] Server-defined URL for this + Output only. Server-defined URL for this resource. This field is a member of `oneof`_ ``_self_link``. unreachables (MutableSequence[str]): - Output only. [Output Only] Unreachable resources. + Output only. Unreachable resources. warning (google.cloud.compute_v1beta.types.Warning): - Output only. [Output Only] Informational warning message. + Output only. Informational warning message. This field is a member of `oneof`_ ``_warning``. """ @@ -65074,34 +66180,36 @@ class InstanceGroupManagerList(proto.Message): Attributes: id (str): - Output only. [Output Only] Unique identifier for the + Output only. Unique identifier for the resource; defined by the server. This field is a member of `oneof`_ ``_id``. items (MutableSequence[google.cloud.compute_v1beta.types.InstanceGroupManager]): A list of InstanceGroupManager resources. kind (str): - Output only. [Output Only] The resource type, which is - always compute#instanceGroupManagerList for a list of - managed instance groups. + Output only. The resource type, which is + always compute#instanceGroupManagerList for a + list of managed instance groups. This field is a member of `oneof`_ ``_kind``. next_page_token (str): - Output only. [Output Only] This token allows you to get the - next page of results for list requests. If the number of - results is larger thanmaxResults, use the nextPageToken as a - value for the query parameter pageToken in the next list - request. Subsequent list requests will have their own - nextPageToken to continue paging through the results. + Output only. This token allows you to get the + next page of results for list requests. If the + number of results is larger thanmaxResults, use + the nextPageToken as a value for the query + parameter pageToken in the next list request. + Subsequent list requests will have their own + nextPageToken to continue paging through the + results. This field is a member of `oneof`_ ``_next_page_token``. self_link (str): - Output only. [Output Only] Server-defined URL for this + Output only. Server-defined URL for this resource. This field is a member of `oneof`_ ``_self_link``. warning (google.cloud.compute_v1beta.types.Warning): - Output only. [Output Only] Informational warning message. + Output only. Informational warning message. This field is a member of `oneof`_ ``_warning``. """ @@ -65457,34 +66565,36 @@ class InstanceGroupManagerResizeRequestsListResponse(proto.Message): Attributes: id (str): - Output only. [Output Only] Unique identifier for the + Output only. Unique identifier for the resource; defined by the server. This field is a member of `oneof`_ ``_id``. items (MutableSequence[google.cloud.compute_v1beta.types.InstanceGroupManagerResizeRequest]): A list of resize request resources. kind (str): - Output only. [Output Only] Type of the resource. - Alwayscompute#instanceGroupManagerResizeRequestList for a - list of resize requests. + Output only. Type of the resource. + Alwayscompute#instanceGroupManagerResizeRequestList + for a list of resize requests. This field is a member of `oneof`_ ``_kind``. next_page_token (str): - Output only. [Output Only] This token allows you to get the - next page of results for list requests. If the number of - results is larger thanmaxResults, use the nextPageToken as a - value for the query parameter pageToken in the next list - request. Subsequent list requests will have their own - nextPageToken to continue paging through the results. + Output only. This token allows you to get the + next page of results for list requests. If the + number of results is larger than maxResults, use + thenextPageToken as a value for the query + parameterpageToken in the next list request. + Subsequent list requests will have their own + nextPageToken to continue paging through the + results. This field is a member of `oneof`_ ``_next_page_token``. self_link (str): - Output only. [Output Only] Server-defined URL for this + Output only. Server-defined URL for this resource. This field is a member of `oneof`_ ``_self_link``. warning (google.cloud.compute_v1beta.types.Warning): - Output only. [Output Only] Informational warning message. + Output only. Informational warning message. This field is a member of `oneof`_ ``_warning``. """ @@ -67402,11 +68512,13 @@ class InstanceGroupManagersScopedList(proto.Message): Attributes: instance_group_managers (MutableSequence[google.cloud.compute_v1beta.types.InstanceGroupManager]): - Output only. [Output Only] The list of managed instance - groups that are contained in the specified project and zone. + Output only. The list of managed instance + groups that are contained in the specified + project and zone. warning (google.cloud.compute_v1beta.types.Warning): - Output only. [Output Only] The warning that replaces the - list of managed instance groups when the list is empty. + Output only. The warning that replaces the + list of managed instance groups when the list is + empty. This field is a member of `oneof`_ ``_warning``. """ @@ -68235,7 +69347,7 @@ class InstanceParams(proto.Message): instance. Tag keys and values have the same definition as resource manager tags. Keys and values can be either in numeric format, such as ``tagKeys/{tag_key_id}`` and - ``tagValues/456`` or in namespaced format such as + ``tagValues/{tag_value_id}`` or in namespaced format such as ``{org_id|project_id}/{tag_key_short_name}`` and ``{tag_value_short_name}``. The field is ignored (both PUT & PATCH) when empty. @@ -68380,10 +69492,12 @@ class InstanceProperties(proto.Message): resource_manager_tags (MutableMapping[str, str]): Input only. Resource manager tags to be bound to the instance. Tag keys and values have the same definition as - resource manager tags. Keys must be in the format - ``tagKeys/{tag_key_id}``, and values are in the format - ``tagValues/456``. The field is ignored (both PUT & PATCH) - when empty. + resource manager tags. Keys and values can be either in + numeric format, such as ``tagKeys/{tag_key_id}`` and + ``tagValues/{tag_value_id}`` or in namespaced format such as + ``{org_id|project_id}/{tag_key_short_name}`` and + ``{tag_value_short_name}``. The field is ignored (both PUT & + PATCH) when empty. resource_policies (MutableSequence[str]): Resource policies (names, not URLs) applied to instances created from these properties. @@ -75019,7 +76133,7 @@ class IntentMismatchBehavior(proto.Enum): class InterconnectGroupsCreateMembersInterconnectInput(proto.Message): - r"""LINT.IfChange + r""" .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -77289,6 +78403,47 @@ class InterconnectsGetMacsecConfigResponse(proto.Message): ) +class Interval(proto.Message): + r"""Represents a time interval, encoded as a Timestamp start + (inclusive) and a Timestamp end (exclusive). + + The start must be less than or equal to the end. + When the start equals the end, the interval is empty (matches no + time). When both start and end are unspecified, the interval + matches any time. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + end_time (str): + Optional. Exclusive end of the interval. + + If specified, a Timestamp matching this interval + will have to be before the end. + + This field is a member of `oneof`_ ``_end_time``. + start_time (str): + Optional. Inclusive start of the interval. + + If specified, a Timestamp matching this interval + will have to be the same or after the start. + + This field is a member of `oneof`_ ``_start_time``. + """ + + end_time: str = proto.Field( + proto.STRING, + number=114938801, + optional=True, + ) + start_time: str = proto.Field( + proto.STRING, + number=37467274, + optional=True, + ) + + class InvalidateCacheRegionUrlMapRequest(proto.Message): r"""A request message for RegionUrlMaps.InvalidateCache. See the method description for details. @@ -77684,6 +78839,15 @@ class LicenseCode(proto.Message): .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + allowed_replacement_licenses (MutableSequence[str]): + Specifies licenseCodes of licenses that can replace this + license. Note: such replacements are allowed even if + removable_from_disk is false. + appendable_to_disk (bool): + If true, this license can be appended to an + existing disk's set of licenses. + + This field is a member of `oneof`_ ``_appendable_to_disk``. creation_timestamp (str): Output only. [Output Only] Creation timestamp inRFC3339 text format. @@ -77698,6 +78862,11 @@ class LicenseCode(proto.Message): resource. This identifier is defined by the server. This field is a member of `oneof`_ ``_id``. + incompatible_licenses (MutableSequence[str]): + Specifies licenseCodes of licenses that are + incompatible with this license. If a license is + incompatible with this license, it cannot be + attached to the same disk or image. kind (str): Output only. [Output Only] Type of resource. Always compute#licenseCode for licenses. @@ -77706,16 +78875,50 @@ class LicenseCode(proto.Message): license_alias (MutableSequence[google.cloud.compute_v1beta.types.LicenseCodeLicenseAlias]): [Output Only] URL and description aliases of Licenses with the same License Code. + minimum_retention (google.cloud.compute_v1beta.types.Duration): + If set, this license will be unable to be removed or + replaced once attached to a disk until the minimum_retention + period has passed. + + This field is a member of `oneof`_ ``_minimum_retention``. + multi_tenant_only (bool): + If true, this license can only be used on VMs + on multi tenant nodes. + + This field is a member of `oneof`_ ``_multi_tenant_only``. name (str): Output only. [Output Only] Name of the resource. The name is 1-20 characters long and must be a valid 64 bit integer. This field is a member of `oneof`_ ``_name``. + os_license (bool): + If true, indicates this is an OS license. + Only one OS license can be attached to a disk or + image at a time. + + This field is a member of `oneof`_ ``_os_license``. + removable_from_disk (bool): + If true, this license can be removed from a + disk's set of licenses, with no replacement + license needed. + + This field is a member of `oneof`_ ``_removable_from_disk``. + required_coattached_licenses (MutableSequence[str]): + Specifies the set of permissible coattached + licenseCodes of licenses that satisfy the + coattachment requirement of this license. At + least one license from the set must be attached + to the same disk or image as this license. self_link (str): Output only. [Output Only] Server-defined URL for the resource. This field is a member of `oneof`_ ``_self_link``. + sole_tenant_only (bool): + If true, this license can only be used on VMs + on sole tenant nodes. + + This field is a member of `oneof`_ ``_sole_tenant_only``. state (str): Output only. [Output Only] Current state of this License Code. Check the State enum for the list of possible values. @@ -77727,6 +78930,11 @@ class LicenseCode(proto.Message): Otherwise, the license is not transferred. This field is a member of `oneof`_ ``_transferable``. + update_timestamp (str): + Output only. [Output Only] Last update timestamp inRFC3339 + text format. + + This field is a member of `oneof`_ ``_update_timestamp``. """ class State(proto.Enum): @@ -77759,6 +78967,15 @@ class State(proto.Enum): STATE_UNSPECIFIED = 470755401 TERMINATED = 250018339 + allowed_replacement_licenses: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=195677718, + ) + appendable_to_disk: bool = proto.Field( + proto.BOOL, + number=16959254, + optional=True, + ) creation_timestamp: str = proto.Field( proto.STRING, number=30525366, @@ -77774,6 +78991,10 @@ class State(proto.Enum): number=3355, optional=True, ) + incompatible_licenses: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=334239768, + ) kind: str = proto.Field( proto.STRING, number=3292052, @@ -77784,16 +79005,46 @@ class State(proto.Enum): number=43550930, message="LicenseCodeLicenseAlias", ) + minimum_retention: "Duration" = proto.Field( + proto.MESSAGE, + number=155398189, + optional=True, + message="Duration", + ) + multi_tenant_only: bool = proto.Field( + proto.BOOL, + number=274395163, + optional=True, + ) name: str = proto.Field( proto.STRING, number=3373707, optional=True, ) + os_license: bool = proto.Field( + proto.BOOL, + number=487986406, + optional=True, + ) + removable_from_disk: bool = proto.Field( + proto.BOOL, + number=25854638, + optional=True, + ) + required_coattached_licenses: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=129195265, + ) self_link: str = proto.Field( proto.STRING, number=456214797, optional=True, ) + sole_tenant_only: bool = proto.Field( + proto.BOOL, + number=427525559, + optional=True, + ) state: str = proto.Field( proto.STRING, number=109757585, @@ -77804,6 +79055,11 @@ class State(proto.Enum): number=4349893, optional=True, ) + update_timestamp: str = proto.Field( + proto.STRING, + number=120894752, + optional=True, + ) class LicenseCodeLicenseAlias(proto.Message): @@ -77844,7 +79100,7 @@ class LicenseParams(proto.Message): license. Tag keys and values have the same definition as resource manager tags. Keys and values can be either in numeric format, such as ``tagKeys/{tag_key_id}`` and - ``tagValues/456`` or in namespaced format such as + ``tagValues/{tag_value_id}`` or in namespaced format such as ``{org_id|project_id}/{tag_key_short_name}`` and ``{tag_value_short_name}``. The field is ignored (both PUT & PATCH) when empty. @@ -103405,6 +104661,12 @@ class ManagedInstance(proto.Message): timestamp of the instance, if applicable. This field is a member of `oneof`_ ``_scheduling``. + shutdown_details (google.cloud.compute_v1beta.types.ManagedInstanceShutdownDetails): + Output only. [Output Only] Specifies the graceful shutdown + details if the instance is in ``PENDING_STOP`` state or + there is a programmed stop scheduled. + + This field is a member of `oneof`_ ``_shutdown_details``. target_status (str): Output only. [Output Only] The eventual status of the instance. The instance group manager will not be identified @@ -103679,6 +104941,12 @@ class TargetStatus(proto.Enum): optional=True, message="ManagedInstanceScheduling", ) + shutdown_details: "ManagedInstanceShutdownDetails" = proto.Field( + proto.MESSAGE, + number=15198553, + optional=True, + message="ManagedInstanceShutdownDetails", + ) target_status: str = proto.Field( proto.STRING, number=307799648, @@ -103903,6 +105171,12 @@ class ManagedInstanceScheduling(proto.Message): .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + graceful_shutdown_timestamp (str): + Output only. [Output Only] The timestamp at which the + underlying instance will be triggered for graceful shutdown + if it is configured. This is in RFC3339 text format. + + This field is a member of `oneof`_ ``_graceful_shutdown_timestamp``. termination_timestamp (str): Output only. [Output Only] The timestamp at which the managed instance will be terminated. This is in RFC3339 text @@ -103911,6 +105185,11 @@ class ManagedInstanceScheduling(proto.Message): This field is a member of `oneof`_ ``_termination_timestamp``. """ + graceful_shutdown_timestamp: str = proto.Field( + proto.STRING, + number=403022375, + optional=True, + ) termination_timestamp: str = proto.Field( proto.STRING, number=364180891, @@ -103918,6 +105197,39 @@ class ManagedInstanceScheduling(proto.Message): ) +class ManagedInstanceShutdownDetails(proto.Message): + r""" + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + max_duration (google.cloud.compute_v1beta.types.Duration): + Output only. [Output Only] The duration for graceful + shutdown. Only applicable when the instance is in + ``PENDING_STOP`` state. + + This field is a member of `oneof`_ ``_max_duration``. + request_timestamp (str): + Output only. [Output Only] Past timestamp indicating the + beginning of ``PENDING_STOP`` state of instance in RFC3339 + text format. + + This field is a member of `oneof`_ ``_request_timestamp``. + """ + + max_duration: "Duration" = proto.Field( + proto.MESSAGE, + number=39954959, + optional=True, + message="Duration", + ) + request_timestamp: str = proto.Field( + proto.STRING, + number=521301862, + optional=True, + ) + + class ManagedInstanceVersion(proto.Message): r""" @@ -104132,6 +105444,51 @@ class MetadataFilterLabelMatch(proto.Message): ) +class Money(proto.Message): + r"""Represents an amount of money with its currency type. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + currency_code (str): + The three-letter currency code defined in ISO + 4217. + + This field is a member of `oneof`_ ``_currency_code``. + nanos (int): + Number of nano (10^-9) units of the amount. The value must + be between -999,999,999 and +999,999,999 inclusive. If + ``units`` is positive, ``nanos`` must be positive or zero. + If ``units`` is zero, ``nanos`` can be positive, zero, or + negative. If ``units`` is negative, ``nanos`` must be + negative or zero. For example $-1.75 is represented as + ``units``\ =-1 and ``nanos``\ =-750,000,000. + + This field is a member of `oneof`_ ``_nanos``. + units (int): + The whole units of the amount. For example if + ``currencyCode`` is ``"USD"``, then 1 unit is one US dollar. + + This field is a member of `oneof`_ ``_units``. + """ + + currency_code: str = proto.Field( + proto.STRING, + number=34986331, + optional=True, + ) + nanos: int = proto.Field( + proto.INT32, + number=104586303, + optional=True, + ) + units: int = proto.Field( + proto.INT64, + number=111433583, + optional=True, + ) + + class MoveAddressRequest(proto.Message): r"""A request message for Addresses.Move. See the method description for details. @@ -107640,6 +108997,10 @@ class NetworkInterface(proto.Message): An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. + alias_ipv6_ranges (MutableSequence[google.cloud.compute_v1beta.types.AliasIpRange]): + An array of alias IPv6 ranges for this + network interface. You can only specify this + field for network interfaces in VPC networks. enable_vpc_scoped_dns (bool): Optional. If true, DNS resolution will be enabled over this interface. Only valid with network_attachment. @@ -107927,6 +109288,11 @@ class StackType(proto.Enum): number=165085631, message="AliasIpRange", ) + alias_ipv6_ranges: MutableSequence["AliasIpRange"] = proto.RepeatedField( + proto.MESSAGE, + number=104028351, + message="AliasIpRange", + ) enable_vpc_scoped_dns: bool = proto.Field( proto.BOOL, number=283425868, @@ -109818,6 +111184,16 @@ class AddressPurposes(proto.Enum): NAT_AUTO (163666477): External IP automatically reserved for Cloud NAT. + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP0 (119932186): + The global external address can only be + assigned to Global External Passthrough Network + Load Balancer forwarding rules, as an + Availability Group 0 address. + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP1 (119932187): + The global external address can only be + assigned to Global External Passthrough Network + Load Balancer forwarding rules, as an + Availability Group 1 address. PRIVATE_SERVICE_CONNECT (48134724): No description available. SERVERLESS (270492508): @@ -109836,6 +111212,8 @@ class AddressPurposes(proto.Enum): GCE_ENDPOINT = 230515243 IPSEC_INTERCONNECT = 340437251 NAT_AUTO = 163666477 + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP0 = 119932186 + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP1 = 119932187 PRIVATE_SERVICE_CONNECT = 48134724 SERVERLESS = 270492508 SHARED_LOADBALANCER_VIP = 294447572 @@ -110420,6 +111798,8 @@ class SubnetworkPurposes(proto.Enum): UNDEFINED_SUBNETWORK_PURPOSES (0): A value indicating that the enum field is not set. + CUSTOM_HARDWARE_LINK (508097443): + Subnetwork used for Custom Hardware Link. GLOBAL_MANAGED_PROXY (236463602): Subnet reserved for Global Envoy-based Load Balancing. @@ -110446,6 +111826,7 @@ class SubnetworkPurposes(proto.Enum): """ UNDEFINED_SUBNETWORK_PURPOSES = 0 + CUSTOM_HARDWARE_LINK = 508097443 GLOBAL_MANAGED_PROXY = 236463602 INTERNAL_HTTPS_LOAD_BALANCER = 248748889 PEER_MIGRATION = 491902225 @@ -122362,6 +123743,38 @@ class PublicDelegatedPrefix(proto.Message): public_delegated_sub_prefixs (MutableSequence[google.cloud.compute_v1beta.types.PublicDelegatedPrefixPublicDelegatedSubPrefix]): The list of sub public delegated prefixes that exist for this public delegated prefix. + purpose (str): + Immutable. The purpose of the public delegated prefix. + + This field can only be set for the top-level global public + delegated prefix. It is an output-only field for the + sub-delegates that inherit the value from the top-level + global public delegated prefix. Once the value is set, it + cannot be changed. + + The field cannot be set for regional public delegated + prefixes. + + The supported values are: + + :: + + - APPLICATION_AND_PROXY_LOAD_BALANCERS: The global public + delegated prefix can only be used by Global External Application and + Proxy Load Balancers to allocate addresses for forwarding rules. This is + the default value. + - PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP0: The + global public delegated prefix can only be used by Global External + Passthrough Network Load Balancers to allocate Availability Group 0 + addresses for forwarding rules. + - PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP1: The + global public delegated prefix can only be used by Global External + Passthrough Network Load Balancers to allocate Availability Group 1 + addresses for forwarding rules. + + Check the Purpose enum for the list of possible values. + + This field is a member of `oneof`_ ``_purpose``. region (str): Output only. [Output Only] URL of the region where the public delegated prefix resides. This field applies only to @@ -122480,6 +123893,59 @@ class Mode(proto.Enum): EXTERNAL_IPV6_SUBNETWORK_CREATION = 61198284 INTERNAL_IPV6_SUBNETWORK_CREATION = 153239834 + class Purpose(proto.Enum): + r"""Immutable. The purpose of the public delegated prefix. + + This field can only be set for the top-level global public delegated + prefix. It is an output-only field for the sub-delegates that + inherit the value from the top-level global public delegated prefix. + Once the value is set, it cannot be changed. + + The field cannot be set for regional public delegated prefixes. + + The supported values are: + + :: + + - APPLICATION_AND_PROXY_LOAD_BALANCERS: The global public + delegated prefix can only be used by Global External Application and + Proxy Load Balancers to allocate addresses for forwarding rules. This is + the default value. + - PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP0: The + global public delegated prefix can only be used by Global External + Passthrough Network Load Balancers to allocate Availability Group 0 + addresses for forwarding rules. + - PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP1: The + global public delegated prefix can only be used by Global External + Passthrough Network Load Balancers to allocate Availability Group 1 + addresses for forwarding rules. + + Values: + UNDEFINED_PURPOSE (0): + A value indicating that the enum field is not + set. + APPLICATION_AND_PROXY_LOAD_BALANCERS (480371116): + The global public delegated prefix can only + be used by Global External Application and Proxy + Load Balancers to allocate addresses for + forwarding rules. This is the default value. + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP0 (119932186): + The global public delegated prefix can only + be used by Global External Passthrough Network + Load Balancers to allocate Availability Group 0 + addresses for forwarding rules. + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP1 (119932187): + The global public delegated prefix can only + be used by Global External Passthrough Network + Load Balancers to allocate Availability Group 1 + addresses for forwarding rules. + """ + + UNDEFINED_PURPOSE = 0 + APPLICATION_AND_PROXY_LOAD_BALANCERS = 480371116 + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP0 = 119932186 + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP1 = 119932187 + class Status(proto.Enum): r"""[Output Only] The status of the public delegated prefix, which can be one of following values: @@ -122607,6 +124073,11 @@ class Status(proto.Enum): number=188940044, message="PublicDelegatedPrefixPublicDelegatedSubPrefix", ) + purpose: str = proto.Field( + proto.STRING, + number=316407070, + optional=True, + ) region: str = proto.Field( proto.STRING, number=138946292, @@ -122836,6 +124307,12 @@ class PublicDelegatedPrefixPublicDelegatedSubPrefix(proto.Message): The name of the sub public delegated prefix. This field is a member of `oneof`_ ``_name``. + purpose (str): + Output only. [Output Only] The purpose of the sub public + delegated prefix. Inherited from parent prefix. Check the + Purpose enum for the list of possible values. + + This field is a member of `oneof`_ ``_purpose``. region (str): Output only. [Output Only] The region of the sub public delegated prefix if it is regional. If absent, the sub @@ -122913,6 +124390,36 @@ class Mode(proto.Enum): EXTERNAL_IPV6_SUBNETWORK_CREATION = 61198284 INTERNAL_IPV6_SUBNETWORK_CREATION = 153239834 + class Purpose(proto.Enum): + r"""Output only. [Output Only] The purpose of the sub public delegated + prefix. Inherited from parent prefix. + + Values: + UNDEFINED_PURPOSE (0): + A value indicating that the enum field is not + set. + APPLICATION_AND_PROXY_LOAD_BALANCERS (480371116): + The global public delegated prefix can only + be used by Global External Application and Proxy + Load Balancers to allocate addresses for + forwarding rules. This is the default value. + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP0 (119932186): + The global public delegated prefix can only + be used by Global External Passthrough Network + Load Balancers to allocate Availability Group 0 + addresses for forwarding rules. + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP1 (119932187): + The global public delegated prefix can only + be used by Global External Passthrough Network + Load Balancers to allocate Availability Group 1 + addresses for forwarding rules. + """ + + UNDEFINED_PURPOSE = 0 + APPLICATION_AND_PROXY_LOAD_BALANCERS = 480371116 + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP0 = 119932186 + PASSTHROUGH_LOAD_BALANCER_AVAILABILITY_GROUP1 = 119932187 + class Status(proto.Enum): r"""Output only. [Output Only] The status of the sub public delegated prefix. @@ -122976,6 +124483,11 @@ class Status(proto.Enum): number=3373707, optional=True, ) + purpose: str = proto.Field( + proto.STRING, + number=316407070, + optional=True, + ) region: str = proto.Field( proto.STRING, number=138946292, @@ -123932,6 +125444,12 @@ class RecreateInstancesInstanceGroupManagerRequest(proto.Message): The name of the managed instance group. instance_group_managers_recreate_instances_request_resource (google.cloud.compute_v1beta.types.InstanceGroupManagersRecreateInstancesRequest): The body resource for this request + no_graceful_shutdown (bool): + When set, graceful shutdown is skipped for + instance recreation even if it's configured for + the instances. + + This field is a member of `oneof`_ ``_no_graceful_shutdown``. project (str): Project ID for this request. request_id (str): @@ -123970,6 +125488,11 @@ class RecreateInstancesInstanceGroupManagerRequest(proto.Message): number=21405952, message="InstanceGroupManagersRecreateInstancesRequest", ) + no_graceful_shutdown: bool = proto.Field( + proto.BOOL, + number=336255890, + optional=True, + ) project: str = proto.Field( proto.STRING, number=227560217, @@ -123996,6 +125519,12 @@ class RecreateInstancesRegionInstanceGroupManagerRequest(proto.Message): Attributes: instance_group_manager (str): Name of the managed instance group. + no_graceful_shutdown (bool): + When set, graceful shutdown is skipped for + instance recreation even if it's configured for + the instances. + + This field is a member of `oneof`_ ``_no_graceful_shutdown``. project (str): Project ID for this request. region (str): @@ -124030,6 +125559,11 @@ class RecreateInstancesRegionInstanceGroupManagerRequest(proto.Message): proto.STRING, number=249363395, ) + no_graceful_shutdown: bool = proto.Field( + proto.BOOL, + number=336255890, + optional=True, + ) project: str = proto.Field( proto.STRING, number=227560217, @@ -124671,34 +126205,37 @@ class RegionInstanceGroupManagerList(proto.Message): Attributes: id (str): - Output only. [Output Only] Unique identifier for the + Output only. Unique identifier for the resource; defined by the server. This field is a member of `oneof`_ ``_id``. items (MutableSequence[google.cloud.compute_v1beta.types.InstanceGroupManager]): A list of InstanceGroupManager resources. kind (str): - Output only. [Output Only] The resource type, which is - always compute#instanceGroupManagerList for a list of - managed instance groups that exist in th regional scope. + Output only. The resource type, which is + always compute#instanceGroupManagerList for a + list of managed instance groups that exist in th + regional scope. This field is a member of `oneof`_ ``_kind``. next_page_token (str): - Output only. [Output Only] This token allows you to get the - next page of results for list requests. If the number of - results is larger thanmaxResults, use the nextPageToken as a - value for the query parameter pageToken in the next list - request. Subsequent list requests will have their own - nextPageToken to continue paging through the results. + Output only. This token allows you to get the + next page of results for list requests. If the + number of results is larger thanmaxResults, use + the nextPageToken as a value for the query + parameter pageToken in the next list request. + Subsequent list requests will have their own + nextPageToken to continue paging through the + results. This field is a member of `oneof`_ ``_next_page_token``. self_link (str): - Output only. [Output Only] Server-defined URL for this + Output only. Server-defined URL for this resource. This field is a member of `oneof`_ ``_self_link``. warning (google.cloud.compute_v1beta.types.Warning): - Output only. [Output Only] Informational warning message. + Output only. Informational warning message. This field is a member of `oneof`_ ``_warning``. """ @@ -124766,37 +126303,39 @@ class RegionInstanceGroupManagerResizeRequestsListResponse(proto.Message): This field is a member of `oneof`_ ``_etag``. id (str): - Output only. [Output Only] Unique identifier for the + Output only. Unique identifier for the resource; defined by the server. This field is a member of `oneof`_ ``_id``. items (MutableSequence[google.cloud.compute_v1beta.types.InstanceGroupManagerResizeRequest]): A list of Resize Request resources. kind (str): - Output only. [Output Only] Type of the resource. + Output only. Type of the resource. Alwayscompute#regionInstanceGroupManagerResizeRequestList for a list of Resize Requests. This field is a member of `oneof`_ ``_kind``. next_page_token (str): - Output only. [Output Only] This token allows you to get the - next page of results for list requests. If the number of - results is larger thanmaxResults, use the nextPageToken as a - value for the query parameter pageToken in the next list - request. Subsequent list requests will have their own - nextPageToken to continue paging through the results. + Output only. This token allows you to get the + next page of results for list requests. If the + number of results is larger thanmaxResults, use + the nextPageToken as a value for the query + parameter pageToken in the next list request. + Subsequent list requests will have their own + nextPageToken to continue paging through the + results. This field is a member of `oneof`_ ``_next_page_token``. self_link (str): - Output only. [Output Only] Server-defined URL for this + Output only. Server-defined URL for this resource. This field is a member of `oneof`_ ``_self_link``. unreachables (MutableSequence[str]): - Output only. [Output Only] Unreachable resources. - end_interface: MixerListResponseWithEtagBuilder + Output only. Unreachable resources. end_interface: + MixerListResponseWithEtagBuilder warning (google.cloud.compute_v1beta.types.Warning): - Output only. [Output Only] Informational warning message. + Output only. Informational warning message. This field is a member of `oneof`_ ``_warning``. """ @@ -133312,6 +134851,12 @@ class RolloutWaveDetailsOrchestratedWaveDetails(proto.Message): Output only. Resource completed so far. This field is a member of `oneof`_ ``_completed_resources_count``. + estimated_completion_time (str): + Output only. Estimated timestamp at which the + wave will complete. Extrapolated from current + progress. + + This field is a member of `oneof`_ ``_estimated_completion_time``. estimated_total_resources_count (int): Output only. Estimated total count of resources. @@ -133338,6 +134883,11 @@ class RolloutWaveDetailsOrchestratedWaveDetails(proto.Message): number=208328833, optional=True, ) + estimated_completion_time: str = proto.Field( + proto.STRING, + number=102305613, + optional=True, + ) estimated_total_resources_count: int = proto.Field( proto.INT64, number=457594807, @@ -143260,6 +144810,34 @@ class SetIamPolicyInterconnectGroupRequest(proto.Message): ) +class SetIamPolicyLicenseCodeRequest(proto.Message): + r"""A request message for LicenseCodes.SetIamPolicy. See the + method description for details. + + Attributes: + global_set_policy_request_resource (google.cloud.compute_v1beta.types.GlobalSetPolicyRequest): + The body resource for this request + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + global_set_policy_request_resource: "GlobalSetPolicyRequest" = proto.Field( + proto.MESSAGE, + number=337048498, + message="GlobalSetPolicyRequest", + ) + project: str = proto.Field( + proto.STRING, + number=227560217, + ) + resource: str = proto.Field( + proto.STRING, + number=195806222, + ) + + class SetIamPolicyLicenseRequest(proto.Message): r"""A request message for Licenses.SetIamPolicy. See the method description for details. @@ -149075,7 +150653,7 @@ class SnapshotParams(proto.Message): snapshot. Tag keys and values have the same definition as resource manager tags. Keys and values can be either in numeric format, such as ``tagKeys/{tag_key_id}`` and - ``tagValues/456`` or in namespaced format such as + ``tagValues/{tag_value_id}`` or in namespaced format such as ``{org_id|project_id}/{tag_key_short_name}`` and ``{tag_value_short_name}``. The field is ignored (both PUT & PATCH) when empty. @@ -150425,6 +152003,23 @@ class SslPolicy(proto.Message): except the last character, which cannot be a dash. This field is a member of `oneof`_ ``_name``. + post_quantum_key_exchange (str): + One of DEFAULT, ENABLED, orDEFERRED. Controls + whether the load balancer negotiates + X25519MLKEM768 key exchange when clients + advertise support for it. When set to DEFAULT, + or if no SSL Policy is attached to the target + proxy, the load balancer disallows + X25519MLKEM768 key exchange before October 2026, + and allows it afterward. When set to ENABLED, + the load balancer allows X25519MLKEM768 key + exchange. When set toDEFERRED, the load balancer + disallows X25519MLKEM768 key exchange until + October 2027, and allows it afterward. Check the + PostQuantumKeyExchange enum for the list of + possible values. + + This field is a member of `oneof`_ ``_post_quantum_key_exchange``. profile (str): Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This @@ -150479,6 +152074,36 @@ class MinTlsVersion(proto.Enum): TLS_1_2 = 33116736 TLS_1_3 = 33116737 + class PostQuantumKeyExchange(proto.Enum): + r"""One of DEFAULT, ENABLED, orDEFERRED. Controls whether the + load balancer negotiates X25519MLKEM768 key exchange when + clients advertise support for it. When set to DEFAULT, or if no + SSL Policy is attached to the target proxy, the load balancer + disallows X25519MLKEM768 key exchange before October 2026, and + allows it afterward. When set to ENABLED, the load balancer + allows X25519MLKEM768 key exchange. When set toDEFERRED, the + load balancer disallows X25519MLKEM768 key exchange until + October 2027, and allows it afterward. + + Values: + UNDEFINED_POST_QUANTUM_KEY_EXCHANGE (0): + A value indicating that the enum field is not + set. + DEFAULT (115302945): + Default behavior: disabled until October + 2026, enabled afterward. + DEFERRED (356775903): + Disabled until October 2027, enabled + afterward. + ENABLED (182130465): + Enabled now. + """ + + UNDEFINED_POST_QUANTUM_KEY_EXCHANGE = 0 + DEFAULT = 115302945 + DEFERRED = 356775903 + ENABLED = 182130465 + class Profile(proto.Enum): r"""Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one @@ -150564,6 +152189,11 @@ class Profile(proto.Enum): number=3373707, optional=True, ) + post_quantum_key_exchange: str = proto.Field( + proto.STRING, + number=245546214, + optional=True, + ) profile: str = proto.Field( proto.STRING, number=227445161, @@ -151522,6 +153152,12 @@ class StopInstancesInstanceGroupManagerRequest(proto.Message): The name of the managed instance group. instance_group_managers_stop_instances_request_resource (google.cloud.compute_v1beta.types.InstanceGroupManagersStopInstancesRequest): The body resource for this request + no_graceful_shutdown (bool): + When set, graceful shutdown is skipped for + instance stopping even if it's configured for + the instances. + + This field is a member of `oneof`_ ``_no_graceful_shutdown``. project (str): Project ID for this request. request_id (str): @@ -151560,6 +153196,11 @@ class StopInstancesInstanceGroupManagerRequest(proto.Message): number=37556877, message="InstanceGroupManagersStopInstancesRequest", ) + no_graceful_shutdown: bool = proto.Field( + proto.BOOL, + number=336255890, + optional=True, + ) project: str = proto.Field( proto.STRING, number=227560217, @@ -151586,6 +153227,12 @@ class StopInstancesRegionInstanceGroupManagerRequest(proto.Message): Attributes: instance_group_manager (str): The name of the managed instance group. + no_graceful_shutdown (bool): + When set, graceful shutdown is skipped for + instance stopping even if it's configured for + the instances. + + This field is a member of `oneof`_ ``_no_graceful_shutdown``. project (str): Project ID for this request. region (str): @@ -151620,6 +153267,11 @@ class StopInstancesRegionInstanceGroupManagerRequest(proto.Message): proto.STRING, number=249363395, ) + no_graceful_shutdown: bool = proto.Field( + proto.BOOL, + number=336255890, + optional=True, + ) project: str = proto.Field( proto.STRING, number=227560217, @@ -152425,7 +154077,7 @@ class StoragePoolParams(proto.Message): pool. Tag keys and values have the same definition as resource manager tags. Keys and values can be either in numeric format, such as ``tagKeys/{tag_key_id}`` and - ``tagValues/456`` or in namespaced format such as + ``tagValues/{tag_value_id}`` or in namespaced format such as ``{org_id|project_id}/{tag_key_short_name}`` and ``{tag_value_short_name}``. The field is ignored (both PUT & PATCH) when empty. @@ -153370,6 +155022,8 @@ class Purpose(proto.Enum): UNDEFINED_PURPOSE (0): A value indicating that the enum field is not set. + CUSTOM_HARDWARE_LINK (508097443): + Subnetwork used for Custom Hardware Link. GLOBAL_MANAGED_PROXY (236463602): Subnet reserved for Global Envoy-based Load Balancing. @@ -153399,6 +155053,7 @@ class Purpose(proto.Enum): """ UNDEFINED_PURPOSE = 0 + CUSTOM_HARDWARE_LINK = 508097443 GLOBAL_MANAGED_PROXY = 236463602 INTERNAL_HTTPS_LOAD_BALANCER = 248748889 PEER_MIGRATION = 491902225 @@ -154031,6 +155686,21 @@ class SubnetworkSecondaryRange(proto.Message): the ``ipCollection`` field. This field is a member of `oneof`_ ``_ip_cidr_range``. + ip_collection (str): + Reference to a Public Delegated Prefix (PDP) for BYOIP. This + field should be specified for configuring BYOGUA internal + IPv6 secondary range. When specified along with the + ip_cidr_range, the ip_cidr_range must lie within the PDP + referenced by the ``ipCollection`` field. When specified + without the ip_cidr_range, the range is auto-allocated from + the PDP referenced by the ``ipCollection`` field. + + This field is a member of `oneof`_ ``_ip_collection``. + ip_version (str): + Check the IpVersion enum for the list of + possible values. + + This field is a member of `oneof`_ ``_ip_version``. range_name (str): The name associated with this subnetwork secondary range, used when adding an alias @@ -154046,11 +155716,41 @@ class SubnetworkSecondaryRange(proto.Message): This field is a member of `oneof`_ ``_reserved_internal_range``. """ + class IpVersion(proto.Enum): + r""" + + Values: + UNDEFINED_IP_VERSION (0): + A value indicating that the enum field is not + set. + IPV4 (2254341): + No description available. + IPV6 (2254343): + No description available. + IP_VERSION_UNSPECIFIED (92360440): + Treated as IPV4 for backward-compatibility. + """ + + UNDEFINED_IP_VERSION = 0 + IPV4 = 2254341 + IPV6 = 2254343 + IP_VERSION_UNSPECIFIED = 92360440 + ip_cidr_range: str = proto.Field( proto.STRING, number=98117322, optional=True, ) + ip_collection: str = proto.Field( + proto.STRING, + number=176818358, + optional=True, + ) + ip_version: str = proto.Field( + proto.STRING, + number=294959552, + optional=True, + ) range_name: str = proto.Field( proto.STRING, number=332216397, @@ -164996,6 +166696,8 @@ class Purpose(proto.Enum): UNDEFINED_PURPOSE (0): A value indicating that the enum field is not set. + CUSTOM_HARDWARE_LINK (508097443): + Subnetwork used for Custom Hardware Link. GLOBAL_MANAGED_PROXY (236463602): Subnet reserved for Global Envoy-based Load Balancing. @@ -165025,6 +166727,7 @@ class Purpose(proto.Enum): """ UNDEFINED_PURPOSE = 0 + CUSTOM_HARDWARE_LINK = 508097443 GLOBAL_MANAGED_PROXY = 236463602 INTERNAL_HTTPS_LOAD_BALANCER = 248748889 PEER_MIGRATION = 491902225 diff --git a/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_advice_capacity_history_sync.py b/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_advice_capacity_history_sync.py new file mode 100644 index 000000000000..785303ef1a70 --- /dev/null +++ b/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_advice_capacity_history_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CapacityHistory +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-compute-v1beta + + +# [START compute_v1beta_generated_Advice_CapacityHistory_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import compute_v1beta + + +def sample_capacity_history(): + # Create a client + client = compute_v1beta.AdviceClient() + + # Initialize request argument(s) + request = compute_v1beta.CapacityHistoryAdviceRequest( + project="project_value", + region="region_value", + ) + + # Make the request + response = client.capacity_history(request=request) + + # Handle the response + print(response) + + +# [END compute_v1beta_generated_Advice_CapacityHistory_sync] diff --git a/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_advice_capacity_sync.py b/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_advice_capacity_sync.py new file mode 100644 index 000000000000..531b2255ba16 --- /dev/null +++ b/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_advice_capacity_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Capacity +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-compute-v1beta + + +# [START compute_v1beta_generated_Advice_Capacity_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import compute_v1beta + + +def sample_capacity(): + # Create a client + client = compute_v1beta.AdviceClient() + + # Initialize request argument(s) + request = compute_v1beta.CapacityAdviceRpcRequest( + project="project_value", + region="region_value", + ) + + # Make the request + response = client.capacity(request=request) + + # Handle the response + print(response) + + +# [END compute_v1beta_generated_Advice_Capacity_sync] diff --git a/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_license_codes_get_iam_policy_sync.py b/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_license_codes_get_iam_policy_sync.py new file mode 100644 index 000000000000..3ee8cd54d933 --- /dev/null +++ b/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_license_codes_get_iam_policy_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-compute-v1beta + + +# [START compute_v1beta_generated_LicenseCodes_GetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import compute_v1beta + + +def sample_get_iam_policy(): + # Create a client + client = compute_v1beta.LicenseCodesClient() + + # Initialize request argument(s) + request = compute_v1beta.GetIamPolicyLicenseCodeRequest( + project="project_value", + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + + +# [END compute_v1beta_generated_LicenseCodes_GetIamPolicy_sync] diff --git a/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_license_codes_set_iam_policy_sync.py b/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_license_codes_set_iam_policy_sync.py new file mode 100644 index 000000000000..2989c8e91a45 --- /dev/null +++ b/packages/google-cloud-compute-v1beta/samples/generated_samples/compute_v1beta_generated_license_codes_set_iam_policy_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-compute-v1beta + + +# [START compute_v1beta_generated_LicenseCodes_SetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import compute_v1beta + + +def sample_set_iam_policy(): + # Create a client + client = compute_v1beta.LicenseCodesClient() + + # Initialize request argument(s) + request = compute_v1beta.SetIamPolicyLicenseCodeRequest( + project="project_value", + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + + +# [END compute_v1beta_generated_LicenseCodes_SetIamPolicy_sync] diff --git a/packages/google-cloud-compute-v1beta/samples/generated_samples/snippet_metadata_google.cloud.compute.v1beta.json b/packages/google-cloud-compute-v1beta/samples/generated_samples/snippet_metadata_google.cloud.compute.v1beta.json index b633823c8415..01b17e08c341 100644 --- a/packages/google-cloud-compute-v1beta/samples/generated_samples/snippet_metadata_google.cloud.compute.v1beta.json +++ b/packages/google-cloud-compute-v1beta/samples/generated_samples/snippet_metadata_google.cloud.compute.v1beta.json @@ -1055,6 +1055,182 @@ ], "title": "compute_v1beta_generated_advice_calendar_mode_sync.py" }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.compute_v1beta.AdviceClient", + "shortName": "AdviceClient" + }, + "fullName": "google.cloud.compute_v1beta.AdviceClient.capacity_history", + "method": { + "fullName": "google.cloud.compute.v1beta.Advice.CapacityHistory", + "service": { + "fullName": "google.cloud.compute.v1beta.Advice", + "shortName": "Advice" + }, + "shortName": "CapacityHistory" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.compute_v1beta.types.CapacityHistoryAdviceRequest" + }, + { + "name": "project", + "type": "str" + }, + { + "name": "region", + "type": "str" + }, + { + "name": "capacity_history_request_resource", + "type": "google.cloud.compute_v1beta.types.CapacityHistoryRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.compute_v1beta.types.CapacityHistoryResponse", + "shortName": "capacity_history" + }, + "description": "Sample for CapacityHistory", + "file": "compute_v1beta_generated_advice_capacity_history_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "compute_v1beta_generated_Advice_CapacityHistory_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "compute_v1beta_generated_advice_capacity_history_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.compute_v1beta.AdviceClient", + "shortName": "AdviceClient" + }, + "fullName": "google.cloud.compute_v1beta.AdviceClient.capacity", + "method": { + "fullName": "google.cloud.compute.v1beta.Advice.Capacity", + "service": { + "fullName": "google.cloud.compute.v1beta.Advice", + "shortName": "Advice" + }, + "shortName": "Capacity" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.compute_v1beta.types.CapacityAdviceRpcRequest" + }, + { + "name": "project", + "type": "str" + }, + { + "name": "region", + "type": "str" + }, + { + "name": "capacity_advice_request_resource", + "type": "google.cloud.compute_v1beta.types.CapacityAdviceRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.compute_v1beta.types.CapacityAdviceResponse", + "shortName": "capacity" + }, + "description": "Sample for Capacity", + "file": "compute_v1beta_generated_advice_capacity_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "compute_v1beta_generated_Advice_Capacity_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "compute_v1beta_generated_advice_capacity_sync.py" + }, { "canonical": true, "clientMethod": { @@ -31267,6 +31443,90 @@ ], "title": "compute_v1beta_generated_interconnects_test_iam_permissions_sync.py" }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.compute_v1beta.LicenseCodesClient", + "shortName": "LicenseCodesClient" + }, + "fullName": "google.cloud.compute_v1beta.LicenseCodesClient.get_iam_policy", + "method": { + "fullName": "google.cloud.compute.v1beta.LicenseCodes.GetIamPolicy", + "service": { + "fullName": "google.cloud.compute.v1beta.LicenseCodes", + "shortName": "LicenseCodes" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.compute_v1beta.types.GetIamPolicyLicenseCodeRequest" + }, + { + "name": "project", + "type": "str" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.compute_v1beta.types.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "compute_v1beta_generated_license_codes_get_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "compute_v1beta_generated_LicenseCodes_GetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "compute_v1beta_generated_license_codes_get_iam_policy_sync.py" + }, { "canonical": true, "clientMethod": { @@ -31351,6 +31611,94 @@ ], "title": "compute_v1beta_generated_license_codes_get_sync.py" }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.compute_v1beta.LicenseCodesClient", + "shortName": "LicenseCodesClient" + }, + "fullName": "google.cloud.compute_v1beta.LicenseCodesClient.set_iam_policy", + "method": { + "fullName": "google.cloud.compute.v1beta.LicenseCodes.SetIamPolicy", + "service": { + "fullName": "google.cloud.compute.v1beta.LicenseCodes", + "shortName": "LicenseCodes" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.compute_v1beta.types.SetIamPolicyLicenseCodeRequest" + }, + { + "name": "project", + "type": "str" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "global_set_policy_request_resource", + "type": "google.cloud.compute_v1beta.types.GlobalSetPolicyRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.compute_v1beta.types.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "compute_v1beta_generated_license_codes_set_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "compute_v1beta_generated_LicenseCodes_SetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "compute_v1beta_generated_license_codes_set_iam_policy_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_advice.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_advice.py index 6b5d28de3c8c..02e2d405031d 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_advice.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_advice.py @@ -1301,90 +1301,910 @@ def test_calendar_mode_rest_flattened_error(transport: str = "rest"): ) -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. +def test_capacity_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.capacity in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.capacity] = mock_rpc + + request = {} + client.capacity(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.capacity(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_capacity_rest_required_fields(request_type=compute.CapacityAdviceRpcRequest): + transport_class = transports.AdviceRestTransport + + request_init = {} + request_init["project"] = "" + request_init["region"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).capacity._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["project"] = "project_value" + jsonified_request["region"] = "region_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).capacity._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "project" in jsonified_request + assert jsonified_request["project"] == "project_value" + assert "region" in jsonified_request + assert jsonified_request["region"] == "region_value" + + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = compute.CapacityAdviceResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = compute.CapacityAdviceResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.capacity(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert sorted(expected_params) == sorted(actual_params) + + +def test_capacity_rest_unset_required_fields(): transport = transports.AdviceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.capacity._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "capacityAdviceRequestResource", + "project", + "region", + ) + ) + ) + + +def test_capacity_rest_flattened(): + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.CapacityAdviceResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project="project_value", + region="region_value", + capacity_advice_request_resource=compute.CapacityAdviceRequest( + distribution_policy=compute.CapacityAdviceRequestDistributionPolicy( + target_shape="target_shape_value" + ) + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = compute.CapacityAdviceResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.capacity(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/compute/beta/projects/{project}/regions/{region}/advice/capacity" + % client.transport._host, + args[1], + ) + + +def test_capacity_rest_flattened_error(transport: str = "rest"): + client = AdviceClient( credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. with pytest.raises(ValueError): + client.capacity( + compute.CapacityAdviceRpcRequest(), + project="project_value", + region="region_value", + capacity_advice_request_resource=compute.CapacityAdviceRequest( + distribution_policy=compute.CapacityAdviceRequestDistributionPolicy( + target_shape="target_shape_value" + ) + ), + ) + + +def test_capacity_history_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = AdviceClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.capacity_history in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.capacity_history] = ( + mock_rpc ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.AdviceRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + request = {} + client.capacity_history(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.capacity_history(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_capacity_history_rest_required_fields( + request_type=compute.CapacityHistoryAdviceRequest, +): + transport_class = transports.AdviceRestTransport + + request_init = {} + request_init["project"] = "" + request_init["region"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).capacity_history._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["project"] = "project_value" + jsonified_request["region"] = "region_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).capacity_history._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "project" in jsonified_request + assert jsonified_request["project"] == "project_value" + assert "region" in jsonified_request + assert jsonified_request["region"] == "region_value" + + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = compute.CapacityHistoryResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = compute.CapacityHistoryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.capacity_history(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert sorted(expected_params) == sorted(actual_params) + + +def test_capacity_history_rest_unset_required_fields(): + transport = transports.AdviceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.capacity_history._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "capacityHistoryRequestResource", + "project", + "region", + ) + ) + ) + + +def test_capacity_history_rest_flattened(): + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.CapacityHistoryResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project="project_value", + region="region_value", + capacity_history_request_resource=compute.CapacityHistoryRequest( + instance_properties=compute.CapacityHistoryRequestInstanceProperties( + machine_type="machine_type_value" + ) + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = compute.CapacityHistoryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.capacity_history(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/compute/beta/projects/{project}/regions/{region}/advice/capacityHistory" + % client.transport._host, + args[1], + ) + + +def test_capacity_history_rest_flattened_error(transport: str = "rest"): + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.capacity_history( + compute.CapacityHistoryAdviceRequest(), + project="project_value", + region="region_value", + capacity_history_request_resource=compute.CapacityHistoryRequest( + instance_properties=compute.CapacityHistoryRequestInstanceProperties( + machine_type="machine_type_value" + ) + ), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AdviceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AdviceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AdviceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.AdviceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = AdviceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = AdviceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AdviceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AdviceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AdviceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = AdviceClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AdviceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_rest(): + transport = AdviceClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "rest" + + +def test_calendar_mode_rest_bad_request( + request_type=compute.CalendarModeAdviceRpcRequest, +): + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with ( + mock.patch.object(Session, "request") as req, + pytest.raises(core_exceptions.BadRequest), + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.calendar_mode(request) + + +@pytest.mark.parametrize( + "request_type", + [ + compute.CalendarModeAdviceRpcRequest, + dict, + ], +) +def test_calendar_mode_rest_call_success(request_type): + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["calendar_mode_advice_request_resource"] = { + "future_resources_specs": {} + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = compute.CalendarModeAdviceRpcRequest.meta.fields[ + "calendar_mode_advice_request_resource" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init[ + "calendar_mode_advice_request_resource" + ].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range( + 0, len(request_init["calendar_mode_advice_request_resource"][field]) + ): + del request_init["calendar_mode_advice_request_resource"][field][i][ + subfield + ] + else: + del request_init["calendar_mode_advice_request_resource"][field][ + subfield + ] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.CalendarModeAdviceResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = compute.CalendarModeAdviceResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.calendar_mode(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.CalendarModeAdviceResponse) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_calendar_mode_rest_interceptors(null_interceptor): + transport = transports.AdviceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AdviceRestInterceptor(), + ) + client = AdviceClient(transport=transport) + + with ( + mock.patch.object(type(client.transport._session), "request") as req, + mock.patch.object(path_template, "transcode") as transcode, + mock.patch.object( + transports.AdviceRestInterceptor, "post_calendar_mode" + ) as post, + mock.patch.object( + transports.AdviceRestInterceptor, "post_calendar_mode_with_metadata" + ) as post_with_metadata, + mock.patch.object(transports.AdviceRestInterceptor, "pre_calendar_mode") as pre, + ): + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = compute.CalendarModeAdviceRpcRequest.pb( + compute.CalendarModeAdviceRpcRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = compute.CalendarModeAdviceResponse.to_json( + compute.CalendarModeAdviceResponse() + ) + req.return_value.content = return_value + + request = compute.CalendarModeAdviceRpcRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = compute.CalendarModeAdviceResponse() + post_with_metadata.return_value = compute.CalendarModeAdviceResponse(), metadata + + client.calendar_mode( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_capacity_rest_bad_request(request_type=compute.CapacityAdviceRpcRequest): + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with ( + mock.patch.object(Session, "request") as req, + pytest.raises(core_exceptions.BadRequest), + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.capacity(request) + + +@pytest.mark.parametrize( + "request_type", + [ + compute.CapacityAdviceRpcRequest, + dict, + ], +) +def test_capacity_rest_call_success(request_type): + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - with pytest.raises(ValueError): - client = AdviceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - # It is an error to provide an api_key and a transport instance. - transport = transports.AdviceRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = AdviceClient( - client_options=options, - transport=transport, - ) + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["capacity_advice_request_resource"] = { + "distribution_policy": { + "target_shape": "target_shape_value", + "zones": [{"zone": "zone_value"}], + }, + "instance_flexibility_policy": {"instance_selections": {}}, + "instance_properties": { + "scheduling": {"provisioning_model": "provisioning_model_value"} + }, + "size": 443, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = AdviceClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) + # Determine if the message type is proto-plus or protobuf + test_field = compute.CapacityAdviceRpcRequest.meta.fields[ + "capacity_advice_request_resource" + ] - # It is an error to provide scopes and a transport instance. - transport = transports.AdviceRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AdviceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init[ + "capacity_advice_request_resource" + ].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range( + 0, len(request_init["capacity_advice_request_resource"][field]) + ): + del request_init["capacity_advice_request_resource"][field][i][ + subfield + ] + else: + del request_init["capacity_advice_request_resource"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.CapacityAdviceResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = compute.CapacityAdviceResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.capacity(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.CapacityAdviceResponse) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_capacity_rest_interceptors(null_interceptor): transport = transports.AdviceRestTransport( credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AdviceRestInterceptor(), ) client = AdviceClient(transport=transport) - assert client.transport is transport + with ( + mock.patch.object(type(client.transport._session), "request") as req, + mock.patch.object(path_template, "transcode") as transcode, + mock.patch.object(transports.AdviceRestInterceptor, "post_capacity") as post, + mock.patch.object( + transports.AdviceRestInterceptor, "post_capacity_with_metadata" + ) as post_with_metadata, + mock.patch.object(transports.AdviceRestInterceptor, "pre_capacity") as pre, + ): + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = compute.CapacityAdviceRpcRequest.pb( + compute.CapacityAdviceRpcRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } -@pytest.mark.parametrize( - "transport_class", - [ - transports.AdviceRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = compute.CapacityAdviceResponse.to_json( + compute.CapacityAdviceResponse() + ) + req.return_value.content = return_value + request = compute.CapacityAdviceRpcRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = compute.CapacityAdviceResponse() + post_with_metadata.return_value = compute.CapacityAdviceResponse(), metadata -def test_transport_kind_rest(): - transport = AdviceClient.get_transport_class("rest")( - credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "rest" + client.capacity( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_calendar_mode_rest_bad_request( - request_type=compute.CalendarModeAdviceRpcRequest, + +def test_capacity_history_rest_bad_request( + request_type=compute.CapacityHistoryAdviceRequest, ): client = AdviceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -1406,33 +2226,38 @@ def test_calendar_mode_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.calendar_mode(request) + client.capacity_history(request) @pytest.mark.parametrize( "request_type", [ - compute.CalendarModeAdviceRpcRequest, + compute.CapacityHistoryAdviceRequest, dict, ], ) -def test_calendar_mode_rest_call_success(request_type): +def test_capacity_history_rest_call_success(request_type): client = AdviceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = {"project": "sample1", "region": "sample2"} - request_init["calendar_mode_advice_request_resource"] = { - "future_resources_specs": {} + request_init["capacity_history_request_resource"] = { + "instance_properties": { + "machine_type": "machine_type_value", + "scheduling": {"provisioning_model": "provisioning_model_value"}, + }, + "location_policy": {"location": "location_value"}, + "types": ["types_value1", "types_value2"], } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency # See https://github.com/googleapis/gapic-generator-python/issues/1748 # Determine if the message type is proto-plus or protobuf - test_field = compute.CalendarModeAdviceRpcRequest.meta.fields[ - "calendar_mode_advice_request_resource" + test_field = compute.CapacityHistoryAdviceRequest.meta.fields[ + "capacity_history_request_resource" ] def get_message_fields(field): @@ -1462,7 +2287,7 @@ def get_message_fields(field): # For each item in the sample request, create a list of sub fields which are not present at runtime # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime for field, value in request_init[ - "calendar_mode_advice_request_resource" + "capacity_history_request_resource" ].items(): # pragma: NO COVER result = None is_repeated = False @@ -1494,40 +2319,43 @@ def get_message_fields(field): if subfield: if field_repeated: for i in range( - 0, len(request_init["calendar_mode_advice_request_resource"][field]) + 0, len(request_init["capacity_history_request_resource"][field]) ): - del request_init["calendar_mode_advice_request_resource"][field][i][ + del request_init["capacity_history_request_resource"][field][i][ subfield ] else: - del request_init["calendar_mode_advice_request_resource"][field][ - subfield - ] + del request_init["capacity_history_request_resource"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = compute.CalendarModeAdviceResponse() + return_value = compute.CapacityHistoryResponse( + location="location_value", + machine_type="machine_type_value", + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 # Convert return value to protobuf type - return_value = compute.CalendarModeAdviceResponse.pb(return_value) + return_value = compute.CapacityHistoryResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.calendar_mode(request) + response = client.capacity_history(request) # Establish that the response is the type that we expect. - assert isinstance(response, compute.CalendarModeAdviceResponse) + assert isinstance(response, compute.CapacityHistoryResponse) + assert response.location == "location_value" + assert response.machine_type == "machine_type_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_calendar_mode_rest_interceptors(null_interceptor): +def test_capacity_history_rest_interceptors(null_interceptor): transport = transports.AdviceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.AdviceRestInterceptor(), @@ -1538,18 +2366,20 @@ def test_calendar_mode_rest_interceptors(null_interceptor): mock.patch.object(type(client.transport._session), "request") as req, mock.patch.object(path_template, "transcode") as transcode, mock.patch.object( - transports.AdviceRestInterceptor, "post_calendar_mode" + transports.AdviceRestInterceptor, "post_capacity_history" ) as post, mock.patch.object( - transports.AdviceRestInterceptor, "post_calendar_mode_with_metadata" + transports.AdviceRestInterceptor, "post_capacity_history_with_metadata" ) as post_with_metadata, - mock.patch.object(transports.AdviceRestInterceptor, "pre_calendar_mode") as pre, + mock.patch.object( + transports.AdviceRestInterceptor, "pre_capacity_history" + ) as pre, ): pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = compute.CalendarModeAdviceRpcRequest.pb( - compute.CalendarModeAdviceRpcRequest() + pb_message = compute.CapacityHistoryAdviceRequest.pb( + compute.CapacityHistoryAdviceRequest() ) transcode.return_value = { "method": "post", @@ -1561,21 +2391,21 @@ def test_calendar_mode_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = compute.CalendarModeAdviceResponse.to_json( - compute.CalendarModeAdviceResponse() + return_value = compute.CapacityHistoryResponse.to_json( + compute.CapacityHistoryResponse() ) req.return_value.content = return_value - request = compute.CalendarModeAdviceRpcRequest() + request = compute.CapacityHistoryAdviceRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = compute.CalendarModeAdviceResponse() - post_with_metadata.return_value = compute.CalendarModeAdviceResponse(), metadata + post.return_value = compute.CapacityHistoryResponse() + post_with_metadata.return_value = compute.CapacityHistoryResponse(), metadata - client.calendar_mode( + client.capacity_history( request, metadata=[ ("key", "val"), @@ -1615,6 +2445,46 @@ def test_calendar_mode_empty_call_rest(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_capacity_empty_call_rest(): + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.capacity), "__call__") as call: + client.capacity(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = compute.CapacityAdviceRpcRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_capacity_history_empty_call_rest(): + client = AdviceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.capacity_history), "__call__") as call: + client.capacity_history(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = compute.CapacityHistoryAdviceRequest() + + assert args[0] == request_msg + + def test_advice_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): @@ -1636,7 +2506,11 @@ def test_advice_base_transport(): # Every method on the transport should just blindly # raise NotImplementedError. - methods = ("calendar_mode",) + methods = ( + "calendar_mode", + "capacity", + "capacity_history", + ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) @@ -1782,6 +2656,12 @@ def test_advice_client_transport_session_collision(transport_name): session1 = client1.transport.calendar_mode._session session2 = client2.transport.calendar_mode._session assert session1 != session2 + session1 = client1.transport.capacity._session + session2 = client2.transport.capacity._session + assert session1 != session2 + session1 = client1.transport.capacity_history._session + session2 = client2.transport.capacity_history._session + assert session1 != session2 def test_common_billing_account_path(): diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_autoscalers.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_autoscalers.py index c7b6c77a3f96..2a92700183a3 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_autoscalers.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_autoscalers.py @@ -4365,6 +4365,7 @@ def test_insert_rest_call_success(request_type): }, "scale_in_control": {"max_scaled_in_replicas": {}, "time_window_sec": 1600}, "scaling_schedules": {}, + "stabilization_period_sec": 2553, }, "creation_timestamp": "creation_timestamp_value", "description": "description_value", @@ -4771,6 +4772,7 @@ def test_patch_rest_call_success(request_type): }, "scale_in_control": {"max_scaled_in_replicas": {}, "time_window_sec": 1600}, "scaling_schedules": {}, + "stabilization_period_sec": 2553, }, "creation_timestamp": "creation_timestamp_value", "description": "description_value", @@ -5258,6 +5260,7 @@ def test_update_rest_call_success(request_type): }, "scale_in_control": {"max_scaled_in_replicas": {}, "time_window_sec": 1600}, "scaling_schedules": {}, + "stabilization_period_sec": 2553, }, "creation_timestamp": "creation_timestamp_value", "description": "description_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_forwarding_rules.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_forwarding_rules.py index 80e2eb9225e9..ab02704e22d7 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_forwarding_rules.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_forwarding_rules.py @@ -1917,6 +1917,8 @@ def test_get_rest_required_fields(request_type=compute.GetForwardingRuleRequest) unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() ).get._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("view",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -1976,7 +1978,7 @@ def test_get_rest_unset_required_fields(): unset_fields = transport.get._get_unset_required_fields({}) assert set(unset_fields) == ( - set(()) + set(("view",)) & set( ( "forwardingRule", @@ -4696,13 +4698,16 @@ def test_get_rest_call_success(request_type): # Designate an appropriate value for the returned response. return_value = compute.ForwardingRule( I_p_address="I_p_address_value", + I_p_addresses=["I_p_addresses_value"], I_p_protocol="I_p_protocol_value", all_ports=True, allow_global_access=True, allow_psc_global_access=True, allow_psc_packet_injection=True, + availability_group="availability_group_value", backend_service="backend_service_value", base_forwarding_rule="base_forwarding_rule_value", + child_forwarding_rules=["child_forwarding_rules_value"], creation_timestamp="creation_timestamp_value", description="description_value", external_managed_backend_bucket_migration_state="external_managed_backend_bucket_migration_state_value", @@ -4719,6 +4724,7 @@ def test_get_rest_call_success(request_type): network="network_value", network_tier="network_tier_value", no_automate_dns_zone=True, + parent_forwarding_rule="parent_forwarding_rule_value", port_range="port_range_value", ports=["ports_value"], psc_connection_id=1793, @@ -4748,13 +4754,16 @@ def test_get_rest_call_success(request_type): # Establish that the response is the type that we expect. assert isinstance(response, compute.ForwardingRule) assert response.I_p_address == "I_p_address_value" + assert response.I_p_addresses == ["I_p_addresses_value"] assert response.I_p_protocol == "I_p_protocol_value" assert response.all_ports is True assert response.allow_global_access is True assert response.allow_psc_global_access is True assert response.allow_psc_packet_injection is True + assert response.availability_group == "availability_group_value" assert response.backend_service == "backend_service_value" assert response.base_forwarding_rule == "base_forwarding_rule_value" + assert response.child_forwarding_rules == ["child_forwarding_rules_value"] assert response.creation_timestamp == "creation_timestamp_value" assert response.description == "description_value" assert ( @@ -4778,6 +4787,7 @@ def test_get_rest_call_success(request_type): assert response.network == "network_value" assert response.network_tier == "network_tier_value" assert response.no_automate_dns_zone is True + assert response.parent_forwarding_rule == "parent_forwarding_rule_value" assert response.port_range == "port_range_value" assert response.ports == ["ports_value"] assert response.psc_connection_id == 1793 @@ -4894,13 +4904,20 @@ def test_insert_rest_call_success(request_type): request_init = {"project": "sample1", "region": "sample2"} request_init["forwarding_rule_resource"] = { "I_p_address": "I_p_address_value", + "I_p_addresses": ["I_p_addresses_value1", "I_p_addresses_value2"], "I_p_protocol": "I_p_protocol_value", "all_ports": True, "allow_global_access": True, "allow_psc_global_access": True, "allow_psc_packet_injection": True, + "attached_extensions": [{"reference": "reference_value"}], + "availability_group": "availability_group_value", "backend_service": "backend_service_value", "base_forwarding_rule": "base_forwarding_rule_value", + "child_forwarding_rules": [ + "child_forwarding_rules_value1", + "child_forwarding_rules_value2", + ], "creation_timestamp": "creation_timestamp_value", "description": "description_value", "external_managed_backend_bucket_migration_state": "external_managed_backend_bucket_migration_state_value", @@ -4924,6 +4941,7 @@ def test_insert_rest_call_success(request_type): "network": "network_value", "network_tier": "network_tier_value", "no_automate_dns_zone": True, + "parent_forwarding_rule": "parent_forwarding_rule_value", "port_range": "port_range_value", "ports": ["ports_value1", "ports_value2"], "psc_connection_id": 1793, @@ -5327,13 +5345,20 @@ def test_patch_rest_call_success(request_type): } request_init["forwarding_rule_resource"] = { "I_p_address": "I_p_address_value", + "I_p_addresses": ["I_p_addresses_value1", "I_p_addresses_value2"], "I_p_protocol": "I_p_protocol_value", "all_ports": True, "allow_global_access": True, "allow_psc_global_access": True, "allow_psc_packet_injection": True, + "attached_extensions": [{"reference": "reference_value"}], + "availability_group": "availability_group_value", "backend_service": "backend_service_value", "base_forwarding_rule": "base_forwarding_rule_value", + "child_forwarding_rules": [ + "child_forwarding_rules_value1", + "child_forwarding_rules_value2", + ], "creation_timestamp": "creation_timestamp_value", "description": "description_value", "external_managed_backend_bucket_migration_state": "external_managed_backend_bucket_migration_state_value", @@ -5357,6 +5382,7 @@ def test_patch_rest_call_success(request_type): "network": "network_value", "network_tier": "network_tier_value", "no_automate_dns_zone": True, + "parent_forwarding_rule": "parent_forwarding_rule_value", "port_range": "port_range_value", "ports": ["ports_value1", "ports_value2"], "psc_connection_id": 1793, diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_global_forwarding_rules.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_global_forwarding_rules.py index 3baa9c8e1f4c..9fbd8746c3a3 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_global_forwarding_rules.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_global_forwarding_rules.py @@ -1647,6 +1647,8 @@ def test_get_rest_required_fields(request_type=compute.GetGlobalForwardingRuleRe unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() ).get._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("view",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -1704,7 +1706,7 @@ def test_get_rest_unset_required_fields(): unset_fields = transport.get._get_unset_required_fields({}) assert set(unset_fields) == ( - set(()) + set(("view",)) & set( ( "forwardingRule", @@ -4159,13 +4161,16 @@ def test_get_rest_call_success(request_type): # Designate an appropriate value for the returned response. return_value = compute.ForwardingRule( I_p_address="I_p_address_value", + I_p_addresses=["I_p_addresses_value"], I_p_protocol="I_p_protocol_value", all_ports=True, allow_global_access=True, allow_psc_global_access=True, allow_psc_packet_injection=True, + availability_group="availability_group_value", backend_service="backend_service_value", base_forwarding_rule="base_forwarding_rule_value", + child_forwarding_rules=["child_forwarding_rules_value"], creation_timestamp="creation_timestamp_value", description="description_value", external_managed_backend_bucket_migration_state="external_managed_backend_bucket_migration_state_value", @@ -4182,6 +4187,7 @@ def test_get_rest_call_success(request_type): network="network_value", network_tier="network_tier_value", no_automate_dns_zone=True, + parent_forwarding_rule="parent_forwarding_rule_value", port_range="port_range_value", ports=["ports_value"], psc_connection_id=1793, @@ -4211,13 +4217,16 @@ def test_get_rest_call_success(request_type): # Establish that the response is the type that we expect. assert isinstance(response, compute.ForwardingRule) assert response.I_p_address == "I_p_address_value" + assert response.I_p_addresses == ["I_p_addresses_value"] assert response.I_p_protocol == "I_p_protocol_value" assert response.all_ports is True assert response.allow_global_access is True assert response.allow_psc_global_access is True assert response.allow_psc_packet_injection is True + assert response.availability_group == "availability_group_value" assert response.backend_service == "backend_service_value" assert response.base_forwarding_rule == "base_forwarding_rule_value" + assert response.child_forwarding_rules == ["child_forwarding_rules_value"] assert response.creation_timestamp == "creation_timestamp_value" assert response.description == "description_value" assert ( @@ -4241,6 +4250,7 @@ def test_get_rest_call_success(request_type): assert response.network == "network_value" assert response.network_tier == "network_tier_value" assert response.no_automate_dns_zone is True + assert response.parent_forwarding_rule == "parent_forwarding_rule_value" assert response.port_range == "port_range_value" assert response.ports == ["ports_value"] assert response.psc_connection_id == 1793 @@ -4361,13 +4371,20 @@ def test_insert_rest_call_success(request_type): request_init = {"project": "sample1"} request_init["forwarding_rule_resource"] = { "I_p_address": "I_p_address_value", + "I_p_addresses": ["I_p_addresses_value1", "I_p_addresses_value2"], "I_p_protocol": "I_p_protocol_value", "all_ports": True, "allow_global_access": True, "allow_psc_global_access": True, "allow_psc_packet_injection": True, + "attached_extensions": [{"reference": "reference_value"}], + "availability_group": "availability_group_value", "backend_service": "backend_service_value", "base_forwarding_rule": "base_forwarding_rule_value", + "child_forwarding_rules": [ + "child_forwarding_rules_value1", + "child_forwarding_rules_value2", + ], "creation_timestamp": "creation_timestamp_value", "description": "description_value", "external_managed_backend_bucket_migration_state": "external_managed_backend_bucket_migration_state_value", @@ -4391,6 +4408,7 @@ def test_insert_rest_call_success(request_type): "network": "network_value", "network_tier": "network_tier_value", "no_automate_dns_zone": True, + "parent_forwarding_rule": "parent_forwarding_rule_value", "port_range": "port_range_value", "ports": ["ports_value1", "ports_value2"], "psc_connection_id": 1793, @@ -4788,13 +4806,20 @@ def test_patch_rest_call_success(request_type): request_init = {"project": "sample1", "forwarding_rule": "sample2"} request_init["forwarding_rule_resource"] = { "I_p_address": "I_p_address_value", + "I_p_addresses": ["I_p_addresses_value1", "I_p_addresses_value2"], "I_p_protocol": "I_p_protocol_value", "all_ports": True, "allow_global_access": True, "allow_psc_global_access": True, "allow_psc_packet_injection": True, + "attached_extensions": [{"reference": "reference_value"}], + "availability_group": "availability_group_value", "backend_service": "backend_service_value", "base_forwarding_rule": "base_forwarding_rule_value", + "child_forwarding_rules": [ + "child_forwarding_rules_value1", + "child_forwarding_rules_value2", + ], "creation_timestamp": "creation_timestamp_value", "description": "description_value", "external_managed_backend_bucket_migration_state": "external_managed_backend_bucket_migration_state_value", @@ -4818,6 +4843,7 @@ def test_patch_rest_call_success(request_type): "network": "network_value", "network_tier": "network_tier_value", "no_automate_dns_zone": True, + "parent_forwarding_rule": "parent_forwarding_rule_value", "port_range": "port_range_value", "ports": ["ports_value1", "ports_value2"], "psc_connection_id": 1793, diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_global_public_delegated_prefixes.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_global_public_delegated_prefixes.py index b3117e0e74a9..44056f95e300 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_global_public_delegated_prefixes.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_global_public_delegated_prefixes.py @@ -3215,6 +3215,7 @@ def test_get_rest_call_success(request_type): mode="mode_value", name="name_value", parent_prefix="parent_prefix_value", + purpose="purpose_value", region="region_value", self_link="self_link_value", status="status_value", @@ -3248,6 +3249,7 @@ def test_get_rest_call_success(request_type): assert response.mode == "mode_value" assert response.name == "name_value" assert response.parent_prefix == "parent_prefix_value" + assert response.purpose == "purpose_value" assert response.region == "region_value" assert response.self_link == "self_link_value" assert response.status == "status_value" @@ -3386,10 +3388,12 @@ def test_insert_rest_call_success(request_type): "is_address": True, "mode": "mode_value", "name": "name_value", + "purpose": "purpose_value", "region": "region_value", "status": "status_value", } ], + "purpose": "purpose_value", "region": "region_value", "self_link": "self_link_value", "status": "status_value", @@ -3807,10 +3811,12 @@ def test_patch_rest_call_success(request_type): "is_address": True, "mode": "mode_value", "name": "name_value", + "purpose": "purpose_value", "region": "region_value", "status": "status_value", } ], + "purpose": "purpose_value", "region": "region_value", "self_link": "self_link_value", "status": "status_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instance_group_managers.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instance_group_managers.py index dafcc52cadc2..20bc64229e5e 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instance_group_managers.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instance_group_managers.py @@ -3304,7 +3304,12 @@ def test_delete_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).delete._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -3364,7 +3369,12 @@ def test_delete_rest_unset_required_fields(): unset_fields = transport.delete._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -3513,7 +3523,12 @@ def test_delete_unary_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).delete._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -3573,7 +3588,12 @@ def test_delete_unary_rest_unset_required_fields(): unset_fields = transport.delete._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -3724,7 +3744,12 @@ def test_delete_instances_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).delete_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -3785,7 +3810,12 @@ def test_delete_instances_rest_unset_required_fields(): unset_fields = transport.delete_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -3943,7 +3973,12 @@ def test_delete_instances_unary_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).delete_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -4004,7 +4039,12 @@ def test_delete_instances_unary_rest_unset_required_fields(): unset_fields = transport.delete_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -7516,7 +7556,12 @@ def test_recreate_instances_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).recreate_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -7577,7 +7622,12 @@ def test_recreate_instances_rest_unset_required_fields(): unset_fields = transport.recreate_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -7737,7 +7787,12 @@ def test_recreate_instances_unary_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).recreate_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -7798,7 +7853,12 @@ def test_recreate_instances_unary_rest_unset_required_fields(): unset_fields = transport.recreate_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -11070,7 +11130,12 @@ def test_stop_instances_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).stop_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -11131,7 +11196,12 @@ def test_stop_instances_rest_unset_required_fields(): unset_fields = transport.stop_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -11287,7 +11357,12 @@ def test_stop_instances_unary_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).stop_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -11348,7 +11423,12 @@ def test_stop_instances_unary_rest_unset_required_fields(): unset_fields = transport.stop_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instance_templates.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instance_templates.py index 95a6d380fd7c..b7fe4d5523b3 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instance_templates.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instance_templates.py @@ -4129,6 +4129,7 @@ def test_insert_rest_call_success(request_type): "subnetwork_range_name": "subnetwork_range_name_value", } ], + "alias_ipv6_ranges": {}, "enable_vpc_scoped_dns": True, "fingerprint": "fingerprint_value", "igmp_query": "igmp_query_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instances.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instances.py index 905d564d9719..afc29b2c014f 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instances.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_instances.py @@ -23263,6 +23263,7 @@ def test_add_network_interface_rest_call_success(request_type): "subnetwork_range_name": "subnetwork_range_name_value", } ], + "alias_ipv6_ranges": {}, "enable_vpc_scoped_dns": True, "fingerprint": "fingerprint_value", "igmp_query": "igmp_query_value", @@ -24338,6 +24339,7 @@ def test_bulk_insert_rest_call_success(request_type): "subnetwork_range_name": "subnetwork_range_name_value", } ], + "alias_ipv6_ranges": {}, "enable_vpc_scoped_dns": True, "fingerprint": "fingerprint_value", "igmp_query": "igmp_query_value", @@ -26718,6 +26720,7 @@ def test_insert_rest_call_success(request_type): "subnetwork_range_name": "subnetwork_range_name_value", } ], + "alias_ipv6_ranges": {}, "enable_vpc_scoped_dns": True, "fingerprint": "fingerprint_value", "igmp_query": "igmp_query_value", @@ -33705,6 +33708,7 @@ def test_update_rest_call_success(request_type): "subnetwork_range_name": "subnetwork_range_name_value", } ], + "alias_ipv6_ranges": {}, "enable_vpc_scoped_dns": True, "fingerprint": "fingerprint_value", "igmp_query": "igmp_query_value", @@ -34598,6 +34602,7 @@ def test_update_network_interface_rest_call_success(request_type): "subnetwork_range_name": "subnetwork_range_name_value", } ], + "alias_ipv6_ranges": {}, "enable_vpc_scoped_dns": True, "fingerprint": "fingerprint_value", "igmp_query": "igmp_query_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_license_codes.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_license_codes.py index f89c2c21e940..6a2082b21a70 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_license_codes.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_license_codes.py @@ -1314,6 +1314,400 @@ def test_get_rest_flattened_error(transport: str = "rest"): ) +def test_get_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_iam_policy_rest_required_fields( + request_type=compute.GetIamPolicyLicenseCodeRequest, +): + transport_class = transports.LicenseCodesRestTransport + + request_init = {} + request_init["project"] = "" + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["project"] = "project_value" + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("options_requested_policy_version",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "project" in jsonified_request + assert jsonified_request["project"] == "project_value" + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = compute.Policy.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_iam_policy(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert sorted(expected_params) == sorted(actual_params) + + +def test_get_iam_policy_rest_unset_required_fields(): + transport = transports.LicenseCodesRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("optionsRequestedPolicyVersion",)) + & set( + ( + "project", + "resource", + ) + ) + ) + + +def test_get_iam_policy_rest_flattened(): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project="project_value", + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = compute.Policy.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/compute/beta/projects/{project}/global/licenseCodes/{resource}/getIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyLicenseCodeRequest(), + project="project_value", + resource="resource_value", + ) + + +def test_set_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_set_iam_policy_rest_required_fields( + request_type=compute.SetIamPolicyLicenseCodeRequest, +): + transport_class = transports.LicenseCodesRestTransport + + request_init = {} + request_init["project"] = "" + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["project"] = "project_value" + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "project" in jsonified_request + assert jsonified_request["project"] == "project_value" + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = compute.Policy.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.set_iam_policy(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert sorted(expected_params) == sorted(actual_params) + + +def test_set_iam_policy_rest_unset_required_fields(): + transport = transports.LicenseCodesRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.set_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "globalSetPolicyRequestResource", + "project", + "resource", + ) + ) + ) + + +def test_set_iam_policy_rest_flattened(): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project="project_value", + resource="resource_value", + global_set_policy_request_resource=compute.GlobalSetPolicyRequest( + bindings=[compute.Binding(binding_id="binding_id_value")] + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = compute.Policy.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/compute/beta/projects/{project}/global/licenseCodes/{resource}/setIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyLicenseCodeRequest(), + project="project_value", + resource="resource_value", + global_set_policy_request_resource=compute.GlobalSetPolicyRequest( + bindings=[compute.Binding(binding_id="binding_id_value")] + ), + ) + + def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -1539,73 +1933,367 @@ def test_credentials_transport_error(): transport=transport, ) - # It is an error to provide an api_key and a transport instance. - transport = transports.LicenseCodesRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = LicenseCodesClient( - client_options=options, - transport=transport, + # It is an error to provide an api_key and a transport instance. + transport = transports.LicenseCodesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = LicenseCodesClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = LicenseCodesClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.LicenseCodesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LicenseCodesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.LicenseCodesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = LicenseCodesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.LicenseCodesRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_rest(): + transport = LicenseCodesClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "rest" + + +def test_get_rest_bad_request(request_type=compute.GetLicenseCodeRequest): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "license_code": "sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with ( + mock.patch.object(Session, "request") as req, + pytest.raises(core_exceptions.BadRequest), + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get(request) + + +@pytest.mark.parametrize( + "request_type", + [ + compute.GetLicenseCodeRequest, + dict, + ], +) +def test_get_rest_call_success(request_type): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "license_code": "sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.LicenseCode( + allowed_replacement_licenses=["allowed_replacement_licenses_value"], + appendable_to_disk=True, + creation_timestamp="creation_timestamp_value", + description="description_value", + id=205, + incompatible_licenses=["incompatible_licenses_value"], + kind="kind_value", + multi_tenant_only=True, + name="name_value", + os_license=True, + removable_from_disk=True, + required_coattached_licenses=["required_coattached_licenses_value"], + self_link="self_link_value", + sole_tenant_only=True, + state="state_value", + transferable=True, + update_timestamp="update_timestamp_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = compute.LicenseCode.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.LicenseCode) + assert response.allowed_replacement_licenses == [ + "allowed_replacement_licenses_value" + ] + assert response.appendable_to_disk is True + assert response.creation_timestamp == "creation_timestamp_value" + assert response.description == "description_value" + assert response.id == 205 + assert response.incompatible_licenses == ["incompatible_licenses_value"] + assert response.kind == "kind_value" + assert response.multi_tenant_only is True + assert response.name == "name_value" + assert response.os_license is True + assert response.removable_from_disk is True + assert response.required_coattached_licenses == [ + "required_coattached_licenses_value" + ] + assert response.self_link == "self_link_value" + assert response.sole_tenant_only is True + assert response.state == "state_value" + assert response.transferable is True + assert response.update_timestamp == "update_timestamp_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_rest_interceptors(null_interceptor): + transport = transports.LicenseCodesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.LicenseCodesRestInterceptor(), + ) + client = LicenseCodesClient(transport=transport) + + with ( + mock.patch.object(type(client.transport._session), "request") as req, + mock.patch.object(path_template, "transcode") as transcode, + mock.patch.object(transports.LicenseCodesRestInterceptor, "post_get") as post, + mock.patch.object( + transports.LicenseCodesRestInterceptor, "post_get_with_metadata" + ) as post_with_metadata, + mock.patch.object(transports.LicenseCodesRestInterceptor, "pre_get") as pre, + ): + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = compute.GetLicenseCodeRequest.pb(compute.GetLicenseCodeRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = compute.LicenseCode.to_json(compute.LicenseCode()) + req.return_value.content = return_value + + request = compute.GetLicenseCodeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = compute.LicenseCode() + post_with_metadata.return_value = compute.LicenseCode(), metadata + + client.get( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_iam_policy_rest_bad_request( + request_type=compute.GetIamPolicyLicenseCodeRequest, +): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with ( + mock.patch.object(Session, "request") as req, + pytest.raises(core_exceptions.BadRequest), + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + compute.GetIamPolicyLicenseCodeRequest, + dict, + ], +) +def test_get_iam_policy_rest_call_success(request_type): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag="etag_value", + iam_owned=True, + version=774, ) - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = LicenseCodesClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 - # It is an error to provide scopes and a transport instance. - transport = transports.LicenseCodesRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = LicenseCodesClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) + # Convert return value to protobuf type + return_value = compute.Policy.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_iam_policy(request) + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == "etag_value" + assert response.iam_owned is True + assert response.version == 774 -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_iam_policy_rest_interceptors(null_interceptor): transport = transports.LicenseCodesRestTransport( credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.LicenseCodesRestInterceptor(), ) client = LicenseCodesClient(transport=transport) - assert client.transport is transport + with ( + mock.patch.object(type(client.transport._session), "request") as req, + mock.patch.object(path_template, "transcode") as transcode, + mock.patch.object( + transports.LicenseCodesRestInterceptor, "post_get_iam_policy" + ) as post, + mock.patch.object( + transports.LicenseCodesRestInterceptor, "post_get_iam_policy_with_metadata" + ) as post_with_metadata, + mock.patch.object( + transports.LicenseCodesRestInterceptor, "pre_get_iam_policy" + ) as pre, + ): + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = compute.GetIamPolicyLicenseCodeRequest.pb( + compute.GetIamPolicyLicenseCodeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = compute.Policy.to_json(compute.Policy()) + req.return_value.content = return_value -@pytest.mark.parametrize( - "transport_class", - [ - transports.LicenseCodesRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + request = compute.GetIamPolicyLicenseCodeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = compute.Policy() + post_with_metadata.return_value = compute.Policy(), metadata + client.get_iam_policy( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) -def test_transport_kind_rest(): - transport = LicenseCodesClient.get_transport_class("rest")( - credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "rest" + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_rest_bad_request(request_type=compute.GetLicenseCodeRequest): +def test_set_iam_policy_rest_bad_request( + request_type=compute.SetIamPolicyLicenseCodeRequest, +): client = LicenseCodesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"project": "sample1", "license_code": "sample2"} + request_init = {"project": "sample1", "resource": "sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -1621,37 +2309,148 @@ def test_get_rest_bad_request(request_type=compute.GetLicenseCodeRequest): response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get(request) + client.set_iam_policy(request) @pytest.mark.parametrize( "request_type", [ - compute.GetLicenseCodeRequest, + compute.SetIamPolicyLicenseCodeRequest, dict, ], ) -def test_get_rest_call_success(request_type): +def test_set_iam_policy_rest_call_success(request_type): client = LicenseCodesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"project": "sample1", "license_code": "sample2"} + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_policy_request_resource"] = { + "bindings": [ + { + "binding_id": "binding_id_value", + "condition": { + "description": "description_value", + "expression": "expression_value", + "location": "location_value", + "title": "title_value", + }, + "members": ["members_value1", "members_value2"], + "role": "role_value", + } + ], + "etag": "etag_value", + "policy": { + "audit_configs": [ + { + "audit_log_configs": [ + { + "exempted_members": [ + "exempted_members_value1", + "exempted_members_value2", + ], + "ignore_child_exemptions": True, + "log_type": "log_type_value", + } + ], + "exempted_members": [ + "exempted_members_value1", + "exempted_members_value2", + ], + "service": "service_value", + } + ], + "bindings": {}, + "etag": "etag_value", + "iam_owned": True, + "version": 774, + }, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = compute.SetIamPolicyLicenseCodeRequest.meta.fields[ + "global_set_policy_request_resource" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init[ + "global_set_policy_request_resource" + ].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range( + 0, len(request_init["global_set_policy_request_resource"][field]) + ): + del request_init["global_set_policy_request_resource"][field][i][ + subfield + ] + else: + del request_init["global_set_policy_request_resource"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = compute.LicenseCode( - creation_timestamp="creation_timestamp_value", - description="description_value", - id=205, - kind="kind_value", - name="name_value", - self_link="self_link_value", - state="state_value", - transferable=True, + return_value = compute.Policy( + etag="etag_value", + iam_owned=True, + version=774, ) # Wrap the value into a proper Response obj @@ -1659,27 +2458,22 @@ def test_get_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = compute.LicenseCode.pb(return_value) + return_value = compute.Policy.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get(request) + response = client.set_iam_policy(request) # Establish that the response is the type that we expect. - assert isinstance(response, compute.LicenseCode) - assert response.creation_timestamp == "creation_timestamp_value" - assert response.description == "description_value" - assert response.id == 205 - assert response.kind == "kind_value" - assert response.name == "name_value" - assert response.self_link == "self_link_value" - assert response.state == "state_value" - assert response.transferable is True + assert isinstance(response, compute.Policy) + assert response.etag == "etag_value" + assert response.iam_owned is True + assert response.version == 774 @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_rest_interceptors(null_interceptor): +def test_set_iam_policy_rest_interceptors(null_interceptor): transport = transports.LicenseCodesRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -1691,16 +2485,22 @@ def test_get_rest_interceptors(null_interceptor): with ( mock.patch.object(type(client.transport._session), "request") as req, mock.patch.object(path_template, "transcode") as transcode, - mock.patch.object(transports.LicenseCodesRestInterceptor, "post_get") as post, mock.patch.object( - transports.LicenseCodesRestInterceptor, "post_get_with_metadata" + transports.LicenseCodesRestInterceptor, "post_set_iam_policy" + ) as post, + mock.patch.object( + transports.LicenseCodesRestInterceptor, "post_set_iam_policy_with_metadata" ) as post_with_metadata, - mock.patch.object(transports.LicenseCodesRestInterceptor, "pre_get") as pre, + mock.patch.object( + transports.LicenseCodesRestInterceptor, "pre_set_iam_policy" + ) as pre, ): pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = compute.GetLicenseCodeRequest.pb(compute.GetLicenseCodeRequest()) + pb_message = compute.SetIamPolicyLicenseCodeRequest.pb( + compute.SetIamPolicyLicenseCodeRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -1711,19 +2511,19 @@ def test_get_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = compute.LicenseCode.to_json(compute.LicenseCode()) + return_value = compute.Policy.to_json(compute.Policy()) req.return_value.content = return_value - request = compute.GetLicenseCodeRequest() + request = compute.SetIamPolicyLicenseCodeRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = compute.LicenseCode() - post_with_metadata.return_value = compute.LicenseCode(), metadata + post.return_value = compute.Policy() + post_with_metadata.return_value = compute.Policy(), metadata - client.get( + client.set_iam_policy( request, metadata=[ ("key", "val"), @@ -1974,6 +2774,46 @@ def test_get_empty_call_rest(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_rest(): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = compute.GetIamPolicyLicenseCodeRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_rest(): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = compute.SetIamPolicyLicenseCodeRequest() + + assert args[0] == request_msg + + # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_test_iam_permissions_empty_call_rest(): @@ -2019,6 +2859,8 @@ def test_license_codes_base_transport(): # raise NotImplementedError. methods = ( "get", + "get_iam_policy", + "set_iam_policy", "test_iam_permissions", ) for method in methods: @@ -2057,7 +2899,6 @@ def test_license_codes_base_transport_with_credentials_file(): "credentials.json", scopes=None, default_scopes=( - "https://www.googleapis.com/auth/compute.readonly", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/cloud-platform", ), @@ -2087,7 +2928,6 @@ def test_license_codes_auth_adc(): adc.assert_called_once_with( scopes=None, default_scopes=( - "https://www.googleapis.com/auth/compute.readonly", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/cloud-platform", ), @@ -2168,6 +3008,12 @@ def test_license_codes_client_transport_session_collision(transport_name): session1 = client1.transport.get._session session2 = client2.transport.get._session assert session1 != session2 + session1 = client1.transport.get_iam_policy._session + session2 = client2.transport.get_iam_policy._session + assert session1 != session2 + session1 = client1.transport.set_iam_policy._session + session2 = client2.transport.set_iam_policy._session + assert session1 != session2 session1 = client1.transport.test_iam_permissions._session session2 = client2.transport.test_iam_permissions._session assert session1 != session2 diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_machine_images.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_machine_images.py index 39e2198335bd..be181a757069 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_machine_images.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_machine_images.py @@ -4099,6 +4099,7 @@ def test_insert_rest_call_success(request_type): "subnetwork_range_name": "subnetwork_range_name_value", } ], + "alias_ipv6_ranges": {}, "enable_vpc_scoped_dns": True, "fingerprint": "fingerprint_value", "igmp_query": "igmp_query_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_public_delegated_prefixes.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_public_delegated_prefixes.py index 7086ce222d3f..ad15e3f8cac6 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_public_delegated_prefixes.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_public_delegated_prefixes.py @@ -4698,6 +4698,7 @@ def test_get_rest_call_success(request_type): mode="mode_value", name="name_value", parent_prefix="parent_prefix_value", + purpose="purpose_value", region="region_value", self_link="self_link_value", status="status_value", @@ -4731,6 +4732,7 @@ def test_get_rest_call_success(request_type): assert response.mode == "mode_value" assert response.name == "name_value" assert response.parent_prefix == "parent_prefix_value" + assert response.purpose == "purpose_value" assert response.region == "region_value" assert response.self_link == "self_link_value" assert response.status == "status_value" @@ -4868,10 +4870,12 @@ def test_insert_rest_call_success(request_type): "is_address": True, "mode": "mode_value", "name": "name_value", + "purpose": "purpose_value", "region": "region_value", "status": "status_value", } ], + "purpose": "purpose_value", "region": "region_value", "self_link": "self_link_value", "status": "status_value", @@ -5294,10 +5298,12 @@ def test_patch_rest_call_success(request_type): "is_address": True, "mode": "mode_value", "name": "name_value", + "purpose": "purpose_value", "region": "region_value", "status": "status_value", } ], + "purpose": "purpose_value", "region": "region_value", "self_link": "self_link_value", "status": "status_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_autoscalers.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_autoscalers.py index 10b650fb68b3..79b88523ad8a 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_autoscalers.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_autoscalers.py @@ -4017,6 +4017,7 @@ def test_insert_rest_call_success(request_type): }, "scale_in_control": {"max_scaled_in_replicas": {}, "time_window_sec": 1600}, "scaling_schedules": {}, + "stabilization_period_sec": 2553, }, "creation_timestamp": "creation_timestamp_value", "description": "description_value", @@ -4437,6 +4438,7 @@ def test_patch_rest_call_success(request_type): }, "scale_in_control": {"max_scaled_in_replicas": {}, "time_window_sec": 1600}, "scaling_schedules": {}, + "stabilization_period_sec": 2553, }, "creation_timestamp": "creation_timestamp_value", "description": "description_value", @@ -4930,6 +4932,7 @@ def test_update_rest_call_success(request_type): }, "scale_in_control": {"max_scaled_in_replicas": {}, "time_window_sec": 1600}, "scaling_schedules": {}, + "stabilization_period_sec": 2553, }, "creation_timestamp": "creation_timestamp_value", "description": "description_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instance_group_managers.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instance_group_managers.py index 5f3394bbdc93..a7b792bad66a 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instance_group_managers.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instance_group_managers.py @@ -3049,7 +3049,12 @@ def test_delete_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).delete._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -3109,7 +3114,12 @@ def test_delete_rest_unset_required_fields(): unset_fields = transport.delete._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -3258,7 +3268,12 @@ def test_delete_unary_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).delete._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -3318,7 +3333,12 @@ def test_delete_unary_rest_unset_required_fields(): unset_fields = transport.delete._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -3469,7 +3489,12 @@ def test_delete_instances_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).delete_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -3530,7 +3555,12 @@ def test_delete_instances_rest_unset_required_fields(): unset_fields = transport.delete_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -3688,7 +3718,12 @@ def test_delete_instances_unary_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).delete_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -3749,7 +3784,12 @@ def test_delete_instances_unary_rest_unset_required_fields(): unset_fields = transport.delete_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -7048,7 +7088,12 @@ def test_recreate_instances_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).recreate_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -7109,7 +7154,12 @@ def test_recreate_instances_rest_unset_required_fields(): unset_fields = transport.recreate_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -7269,7 +7319,12 @@ def test_recreate_instances_unary_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).recreate_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -7330,7 +7385,12 @@ def test_recreate_instances_unary_rest_unset_required_fields(): unset_fields = transport.recreate_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -10602,7 +10662,12 @@ def test_stop_instances_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).stop_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -10663,7 +10728,12 @@ def test_stop_instances_rest_unset_required_fields(): unset_fields = transport.stop_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", @@ -10819,7 +10889,12 @@ def test_stop_instances_unary_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).stop_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) + assert not set(unset_fields) - set( + ( + "no_graceful_shutdown", + "request_id", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -10880,7 +10955,12 @@ def test_stop_instances_unary_rest_unset_required_fields(): unset_fields = transport.stop_instances._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("requestId",)) + set( + ( + "noGracefulShutdown", + "requestId", + ) + ) & set( ( "instanceGroupManager", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instance_templates.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instance_templates.py index 2788d84af6f9..67e758592f38 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instance_templates.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instance_templates.py @@ -3079,6 +3079,7 @@ def test_insert_rest_call_success(request_type): "subnetwork_range_name": "subnetwork_range_name_value", } ], + "alias_ipv6_ranges": {}, "enable_vpc_scoped_dns": True, "fingerprint": "fingerprint_value", "igmp_query": "igmp_query_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instances.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instances.py index 08c0c9af9812..4a2b9aa771d0 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instances.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_instances.py @@ -1814,6 +1814,7 @@ def test_bulk_insert_rest_call_success(request_type): "subnetwork_range_name": "subnetwork_range_name_value", } ], + "alias_ipv6_ranges": {}, "enable_vpc_scoped_dns": True, "fingerprint": "fingerprint_value", "igmp_query": "igmp_query_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_ssl_policies.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_ssl_policies.py index f435322363e7..fbbbd6f40e78 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_ssl_policies.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_region_ssl_policies.py @@ -3637,6 +3637,7 @@ def test_get_rest_call_success(request_type): kind="kind_value", min_tls_version="min_tls_version_value", name="name_value", + post_quantum_key_exchange="post_quantum_key_exchange_value", profile="profile_value", region="region_value", self_link="self_link_value", @@ -3665,6 +3666,7 @@ def test_get_rest_call_success(request_type): assert response.kind == "kind_value" assert response.min_tls_version == "min_tls_version_value" assert response.name == "name_value" + assert response.post_quantum_key_exchange == "post_quantum_key_exchange_value" assert response.profile == "profile_value" assert response.region == "region_value" assert response.self_link == "self_link_value" @@ -3782,6 +3784,7 @@ def test_insert_rest_call_success(request_type): "kind": "kind_value", "min_tls_version": "min_tls_version_value", "name": "name_value", + "post_quantum_key_exchange": "post_quantum_key_exchange_value", "profile": "profile_value", "region": "region_value", "self_link": "self_link_value", @@ -4310,6 +4313,7 @@ def test_patch_rest_call_success(request_type): "kind": "kind_value", "min_tls_version": "min_tls_version_value", "name": "name_value", + "post_quantum_key_exchange": "post_quantum_key_exchange_value", "profile": "profile_value", "region": "region_value", "self_link": "self_link_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_ssl_policies.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_ssl_policies.py index 816c1255c2d7..bcd42371991f 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_ssl_policies.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_ssl_policies.py @@ -3891,6 +3891,7 @@ def test_get_rest_call_success(request_type): kind="kind_value", min_tls_version="min_tls_version_value", name="name_value", + post_quantum_key_exchange="post_quantum_key_exchange_value", profile="profile_value", region="region_value", self_link="self_link_value", @@ -3919,6 +3920,7 @@ def test_get_rest_call_success(request_type): assert response.kind == "kind_value" assert response.min_tls_version == "min_tls_version_value" assert response.name == "name_value" + assert response.post_quantum_key_exchange == "post_quantum_key_exchange_value" assert response.profile == "profile_value" assert response.region == "region_value" assert response.self_link == "self_link_value" @@ -4030,6 +4032,7 @@ def test_insert_rest_call_success(request_type): "kind": "kind_value", "min_tls_version": "min_tls_version_value", "name": "name_value", + "post_quantum_key_exchange": "post_quantum_key_exchange_value", "profile": "profile_value", "region": "region_value", "self_link": "self_link_value", @@ -4546,6 +4549,7 @@ def test_patch_rest_call_success(request_type): "kind": "kind_value", "min_tls_version": "min_tls_version_value", "name": "name_value", + "post_quantum_key_exchange": "post_quantum_key_exchange_value", "profile": "profile_value", "region": "region_value", "self_link": "self_link_value", diff --git a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_subnetworks.py b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_subnetworks.py index 97021e979925..231875b0e43a 100644 --- a/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_subnetworks.py +++ b/packages/google-cloud-compute-v1beta/tests/unit/gapic/compute_v1beta/test_subnetworks.py @@ -5974,6 +5974,8 @@ def test_insert_rest_call_success(request_type): "secondary_ip_ranges": [ { "ip_cidr_range": "ip_cidr_range_value", + "ip_collection": "ip_collection_value", + "ip_version": "ip_version_value", "range_name": "range_name_value", "reserved_internal_range": "reserved_internal_range_value", } @@ -6546,6 +6548,8 @@ def test_patch_rest_call_success(request_type): "secondary_ip_ranges": [ { "ip_cidr_range": "ip_cidr_range_value", + "ip_collection": "ip_collection_value", + "ip_version": "ip_version_value", "range_name": "range_name_value", "reserved_internal_range": "reserved_internal_range_value", } diff --git a/packages/google-cloud-container/google/cloud/container/__init__.py b/packages/google-cloud-container/google/cloud/container/__init__.py index 5cc5a824f54b..f224ab6a676a 100644 --- a/packages/google-cloud-container/google/cloud/container/__init__.py +++ b/packages/google-cloud-container/google/cloud/container/__init__.py @@ -52,6 +52,7 @@ CloudRunConfig, Cluster, ClusterAutoscaling, + ClusterPolicyConfig, ClusterUpdate, ClusterUpgradeInfo, CompleteIPRotationRequest, @@ -60,6 +61,7 @@ ConfidentialNodes, ConfigConnectorConfig, ContainerdConfig, + ControlPlaneEgress, ControlPlaneEndpointsConfig, CostManagementConfig, CreateClusterRequest, @@ -73,6 +75,7 @@ DeleteNodePoolRequest, DesiredAdditionalIPRangesConfig, DesiredEnterpriseConfig, + DisruptionBudget, DisruptionEvent, DnsCacheConfig, DNSConfig, @@ -133,6 +136,7 @@ MaintenanceExclusionOptions, MaintenancePolicy, MaintenanceWindow, + ManagedMachineLearningDiagnosticsConfig, ManagedOpenTelemetryConfig, ManagedPrometheusConfig, MasterAuth, @@ -149,6 +153,7 @@ NetworkTierConfig, NodeConfig, NodeConfigDefaults, + NodeCreationConfig, NodeKubeletConfig, NodeLabels, NodeManagement, @@ -160,6 +165,7 @@ NodePoolLoggingConfig, NodePoolUpdateStrategy, NodePoolUpgradeInfo, + NodeReadinessConfig, NodeTaint, NodeTaints, NotificationConfig, @@ -168,6 +174,7 @@ ParallelstoreCsiDriverConfig, PodAutoscaling, PodCIDROverprovisionConfig, + PodSnapshotConfig, PrivateClusterConfig, PrivateClusterMasterGlobalAccessConfig, PrivateIPv6GoogleAccess, @@ -177,6 +184,7 @@ RayClusterMonitoringConfig, RayOperatorConfig, RBACBindingConfig, + RecurringMaintenanceWindow, RecurringTimeWindow, ReleaseChannel, ReservationAffinity, @@ -186,9 +194,11 @@ ResourceUsageExportConfig, RollbackNodePoolUpgradeRequest, SandboxConfig, + ScheduleUpgradeConfig, SecondaryBootDisk, SecondaryBootDiskUpdateStrategy, SecretManagerConfig, + SecretSyncConfig, SecurityBulletinEvent, SecurityPostureConfig, ServerConfig, @@ -208,11 +218,13 @@ ShieldedInstanceConfig, ShieldedNodes, SliceControllerConfig, + SlurmOperatorConfig, SoleTenantConfig, StackType, StartIPRotationRequest, StatefulHAConfig, StatusCondition, + TaintConfig, TimeWindow, TopologyManager, UpdateClusterRequest, @@ -264,6 +276,7 @@ "CloudRunConfig", "Cluster", "ClusterAutoscaling", + "ClusterPolicyConfig", "ClusterUpdate", "ClusterUpgradeInfo", "CompleteIPRotationRequest", @@ -272,6 +285,7 @@ "ConfidentialNodes", "ConfigConnectorConfig", "ContainerdConfig", + "ControlPlaneEgress", "ControlPlaneEndpointsConfig", "CostManagementConfig", "CreateClusterRequest", @@ -284,6 +298,7 @@ "DeleteNodePoolRequest", "DesiredAdditionalIPRangesConfig", "DesiredEnterpriseConfig", + "DisruptionBudget", "DisruptionEvent", "DnsCacheConfig", "DNSConfig", @@ -342,6 +357,7 @@ "MaintenanceExclusionOptions", "MaintenancePolicy", "MaintenanceWindow", + "ManagedMachineLearningDiagnosticsConfig", "ManagedOpenTelemetryConfig", "ManagedPrometheusConfig", "MasterAuth", @@ -358,6 +374,7 @@ "NetworkTierConfig", "NodeConfig", "NodeConfigDefaults", + "NodeCreationConfig", "NodeKubeletConfig", "NodeLabels", "NodeManagement", @@ -368,6 +385,7 @@ "NodePoolDefaults", "NodePoolLoggingConfig", "NodePoolUpgradeInfo", + "NodeReadinessConfig", "NodeTaint", "NodeTaints", "NotificationConfig", @@ -376,6 +394,7 @@ "ParallelstoreCsiDriverConfig", "PodAutoscaling", "PodCIDROverprovisionConfig", + "PodSnapshotConfig", "PrivateClusterConfig", "PrivateClusterMasterGlobalAccessConfig", "PrivilegedAdmissionConfig", @@ -384,6 +403,7 @@ "RayClusterMonitoringConfig", "RayOperatorConfig", "RBACBindingConfig", + "RecurringMaintenanceWindow", "RecurringTimeWindow", "ReleaseChannel", "ReservationAffinity", @@ -393,9 +413,11 @@ "ResourceUsageExportConfig", "RollbackNodePoolUpgradeRequest", "SandboxConfig", + "ScheduleUpgradeConfig", "SecondaryBootDisk", "SecondaryBootDiskUpdateStrategy", "SecretManagerConfig", + "SecretSyncConfig", "SecurityBulletinEvent", "SecurityPostureConfig", "ServerConfig", @@ -415,10 +437,12 @@ "ShieldedInstanceConfig", "ShieldedNodes", "SliceControllerConfig", + "SlurmOperatorConfig", "SoleTenantConfig", "StartIPRotationRequest", "StatefulHAConfig", "StatusCondition", + "TaintConfig", "TimeWindow", "TopologyManager", "UpdateClusterRequest", diff --git a/packages/google-cloud-container/google/cloud/container_v1/__init__.py b/packages/google-cloud-container/google/cloud/container_v1/__init__.py index e33e528e5571..4543b524b21b 100644 --- a/packages/google-cloud-container/google/cloud/container_v1/__init__.py +++ b/packages/google-cloud-container/google/cloud/container_v1/__init__.py @@ -52,6 +52,7 @@ CloudRunConfig, Cluster, ClusterAutoscaling, + ClusterPolicyConfig, ClusterUpdate, ClusterUpgradeInfo, CompleteIPRotationRequest, @@ -60,6 +61,7 @@ ConfidentialNodes, ConfigConnectorConfig, ContainerdConfig, + ControlPlaneEgress, ControlPlaneEndpointsConfig, CostManagementConfig, CreateClusterRequest, @@ -73,6 +75,7 @@ DeleteNodePoolRequest, DesiredAdditionalIPRangesConfig, DesiredEnterpriseConfig, + DisruptionBudget, DisruptionEvent, DnsCacheConfig, DNSConfig, @@ -133,6 +136,7 @@ MaintenanceExclusionOptions, MaintenancePolicy, MaintenanceWindow, + ManagedMachineLearningDiagnosticsConfig, ManagedOpenTelemetryConfig, ManagedPrometheusConfig, MasterAuth, @@ -149,6 +153,7 @@ NetworkTierConfig, NodeConfig, NodeConfigDefaults, + NodeCreationConfig, NodeKubeletConfig, NodeLabels, NodeManagement, @@ -160,6 +165,7 @@ NodePoolLoggingConfig, NodePoolUpdateStrategy, NodePoolUpgradeInfo, + NodeReadinessConfig, NodeTaint, NodeTaints, NotificationConfig, @@ -168,6 +174,7 @@ ParallelstoreCsiDriverConfig, PodAutoscaling, PodCIDROverprovisionConfig, + PodSnapshotConfig, PrivateClusterConfig, PrivateClusterMasterGlobalAccessConfig, PrivateIPv6GoogleAccess, @@ -177,6 +184,7 @@ RayClusterMonitoringConfig, RayOperatorConfig, RBACBindingConfig, + RecurringMaintenanceWindow, RecurringTimeWindow, ReleaseChannel, ReservationAffinity, @@ -186,9 +194,11 @@ ResourceUsageExportConfig, RollbackNodePoolUpgradeRequest, SandboxConfig, + ScheduleUpgradeConfig, SecondaryBootDisk, SecondaryBootDiskUpdateStrategy, SecretManagerConfig, + SecretSyncConfig, SecurityBulletinEvent, SecurityPostureConfig, ServerConfig, @@ -208,11 +218,13 @@ ShieldedInstanceConfig, ShieldedNodes, SliceControllerConfig, + SlurmOperatorConfig, SoleTenantConfig, StackType, StartIPRotationRequest, StatefulHAConfig, StatusCondition, + TaintConfig, TimeWindow, TopologyManager, UpdateClusterRequest, @@ -347,6 +359,7 @@ def _get_version(dependency_name): "Cluster", "ClusterAutoscaling", "ClusterManagerClient", + "ClusterPolicyConfig", "ClusterUpdate", "ClusterUpgradeInfo", "CompleteIPRotationRequest", @@ -355,6 +368,7 @@ def _get_version(dependency_name): "ConfidentialNodes", "ConfigConnectorConfig", "ContainerdConfig", + "ControlPlaneEgress", "ControlPlaneEndpointsConfig", "CostManagementConfig", "CreateClusterRequest", @@ -369,6 +383,7 @@ def _get_version(dependency_name): "DeleteNodePoolRequest", "DesiredAdditionalIPRangesConfig", "DesiredEnterpriseConfig", + "DisruptionBudget", "DisruptionEvent", "DnsCacheConfig", "EnterpriseConfig", @@ -428,6 +443,7 @@ def _get_version(dependency_name): "MaintenanceExclusionOptions", "MaintenancePolicy", "MaintenanceWindow", + "ManagedMachineLearningDiagnosticsConfig", "ManagedOpenTelemetryConfig", "ManagedPrometheusConfig", "MasterAuth", @@ -444,6 +460,7 @@ def _get_version(dependency_name): "NetworkTierConfig", "NodeConfig", "NodeConfigDefaults", + "NodeCreationConfig", "NodeKubeletConfig", "NodeLabels", "NodeManagement", @@ -455,6 +472,7 @@ def _get_version(dependency_name): "NodePoolLoggingConfig", "NodePoolUpdateStrategy", "NodePoolUpgradeInfo", + "NodeReadinessConfig", "NodeTaint", "NodeTaints", "NotificationConfig", @@ -463,6 +481,7 @@ def _get_version(dependency_name): "ParallelstoreCsiDriverConfig", "PodAutoscaling", "PodCIDROverprovisionConfig", + "PodSnapshotConfig", "PrivateClusterConfig", "PrivateClusterMasterGlobalAccessConfig", "PrivateIPv6GoogleAccess", @@ -472,6 +491,7 @@ def _get_version(dependency_name): "RayClusterLoggingConfig", "RayClusterMonitoringConfig", "RayOperatorConfig", + "RecurringMaintenanceWindow", "RecurringTimeWindow", "ReleaseChannel", "ReservationAffinity", @@ -481,9 +501,11 @@ def _get_version(dependency_name): "ResourceUsageExportConfig", "RollbackNodePoolUpgradeRequest", "SandboxConfig", + "ScheduleUpgradeConfig", "SecondaryBootDisk", "SecondaryBootDiskUpdateStrategy", "SecretManagerConfig", + "SecretSyncConfig", "SecurityBulletinEvent", "SecurityPostureConfig", "ServerConfig", @@ -503,11 +525,13 @@ def _get_version(dependency_name): "ShieldedInstanceConfig", "ShieldedNodes", "SliceControllerConfig", + "SlurmOperatorConfig", "SoleTenantConfig", "StackType", "StartIPRotationRequest", "StatefulHAConfig", "StatusCondition", + "TaintConfig", "TimeWindow", "TopologyManager", "UpdateClusterRequest", diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/async_client.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/async_client.py index 93fb836c1522..bfdc5bd3491f 100644 --- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/async_client.py +++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/async_client.py @@ -5106,7 +5106,7 @@ async def fetch_node_pool_upgrade_info( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> cluster_service.NodePoolUpgradeInfo: - r"""Fetch upgrade information of a specific nodepool. + r"""Fetch upgrade information of a specific node pool. .. code-block:: python @@ -5138,12 +5138,12 @@ async def sample_fetch_node_pool_upgrade_info(): request (Optional[Union[google.cloud.container_v1.types.FetchNodePoolUpgradeInfoRequest, dict]]): The request object. FetchNodePoolUpgradeInfoRequest fetches the upgrade information of a - nodepool. + node pool. name (:class:`str`): - Required. The name (project, location, cluster, - nodepool) of the nodepool to get. Specified in the - format ``projects/*/locations/*/clusters/*/nodePools/*`` - or ``projects/*/zones/*/clusters/*/nodePools/*``. + Required. The name (project, location, cluster, node + pool) of the node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*`` or + ``projects/*/zones/*/clusters/*/nodePools/*``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -5159,7 +5159,7 @@ async def sample_fetch_node_pool_upgrade_info(): Returns: google.cloud.container_v1.types.NodePoolUpgradeInfo: NodePoolUpgradeInfo contains the - upgrade information of a nodepool. + upgrade information of a node pool. """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/client.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/client.py index 28b4cb9fbae6..66c2a88c4ac9 100644 --- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/client.py +++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/client.py @@ -5511,7 +5511,7 @@ def fetch_node_pool_upgrade_info( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> cluster_service.NodePoolUpgradeInfo: - r"""Fetch upgrade information of a specific nodepool. + r"""Fetch upgrade information of a specific node pool. .. code-block:: python @@ -5543,12 +5543,12 @@ def sample_fetch_node_pool_upgrade_info(): request (Union[google.cloud.container_v1.types.FetchNodePoolUpgradeInfoRequest, dict]): The request object. FetchNodePoolUpgradeInfoRequest fetches the upgrade information of a - nodepool. + node pool. name (str): - Required. The name (project, location, cluster, - nodepool) of the nodepool to get. Specified in the - format ``projects/*/locations/*/clusters/*/nodePools/*`` - or ``projects/*/zones/*/clusters/*/nodePools/*``. + Required. The name (project, location, cluster, node + pool) of the node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*`` or + ``projects/*/zones/*/clusters/*/nodePools/*``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -5564,7 +5564,7 @@ def sample_fetch_node_pool_upgrade_info(): Returns: google.cloud.container_v1.types.NodePoolUpgradeInfo: NodePoolUpgradeInfo contains the - upgrade information of a nodepool. + upgrade information of a node pool. """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/base.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/base.py index 6ff11ec37b84..308fb5944c62 100644 --- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/base.py +++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/base.py @@ -40,7 +40,11 @@ class ClusterManagerTransport(abc.ABC): """Abstract transport class for ClusterManager.""" - AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ) DEFAULT_HOST: str = "container.googleapis.com" diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc.py index e38bbce38dde..5c509d1c9d13 100644 --- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc.py +++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc.py @@ -1327,7 +1327,7 @@ def fetch_node_pool_upgrade_info( ]: r"""Return a callable for the fetch node pool upgrade info method over gRPC. - Fetch upgrade information of a specific nodepool. + Fetch upgrade information of a specific node pool. Returns: Callable[[~.FetchNodePoolUpgradeInfoRequest], diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py index b91cb33aceeb..d8827e6e126e 100644 --- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py +++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py @@ -1384,7 +1384,7 @@ def fetch_node_pool_upgrade_info( ]: r"""Return a callable for the fetch node pool upgrade info method over gRPC. - Fetch upgrade information of a specific nodepool. + Fetch upgrade information of a specific node pool. Returns: Callable[[~.FetchNodePoolUpgradeInfoRequest], diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/rest.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/rest.py index a64d9fb66a69..97d856cc90aa 100644 --- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/rest.py +++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/rest.py @@ -3497,7 +3497,7 @@ def __call__( request (~.cluster_service.FetchNodePoolUpgradeInfoRequest): The request object. FetchNodePoolUpgradeInfoRequest fetches the upgrade information of a - nodepool. + node pool. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3509,7 +3509,7 @@ def __call__( Returns: ~.cluster_service.NodePoolUpgradeInfo: NodePoolUpgradeInfo contains the - upgrade information of a nodepool. + upgrade information of a node pool. """ diff --git a/packages/google-cloud-container/google/cloud/container_v1/types/__init__.py b/packages/google-cloud-container/google/cloud/container_v1/types/__init__.py index 11d7f2d9ea39..8e49125b348e 100644 --- a/packages/google-cloud-container/google/cloud/container_v1/types/__init__.py +++ b/packages/google-cloud-container/google/cloud/container_v1/types/__init__.py @@ -41,6 +41,7 @@ CloudRunConfig, Cluster, ClusterAutoscaling, + ClusterPolicyConfig, ClusterUpdate, ClusterUpgradeInfo, CompleteIPRotationRequest, @@ -49,6 +50,7 @@ ConfidentialNodes, ConfigConnectorConfig, ContainerdConfig, + ControlPlaneEgress, ControlPlaneEndpointsConfig, CostManagementConfig, CreateClusterRequest, @@ -62,6 +64,7 @@ DeleteNodePoolRequest, DesiredAdditionalIPRangesConfig, DesiredEnterpriseConfig, + DisruptionBudget, DisruptionEvent, DnsCacheConfig, DNSConfig, @@ -122,6 +125,7 @@ MaintenanceExclusionOptions, MaintenancePolicy, MaintenanceWindow, + ManagedMachineLearningDiagnosticsConfig, ManagedOpenTelemetryConfig, ManagedPrometheusConfig, MasterAuth, @@ -138,6 +142,7 @@ NetworkTierConfig, NodeConfig, NodeConfigDefaults, + NodeCreationConfig, NodeKubeletConfig, NodeLabels, NodeManagement, @@ -149,6 +154,7 @@ NodePoolLoggingConfig, NodePoolUpdateStrategy, NodePoolUpgradeInfo, + NodeReadinessConfig, NodeTaint, NodeTaints, NotificationConfig, @@ -157,6 +163,7 @@ ParallelstoreCsiDriverConfig, PodAutoscaling, PodCIDROverprovisionConfig, + PodSnapshotConfig, PrivateClusterConfig, PrivateClusterMasterGlobalAccessConfig, PrivateIPv6GoogleAccess, @@ -166,6 +173,7 @@ RayClusterMonitoringConfig, RayOperatorConfig, RBACBindingConfig, + RecurringMaintenanceWindow, RecurringTimeWindow, ReleaseChannel, ReservationAffinity, @@ -175,9 +183,11 @@ ResourceUsageExportConfig, RollbackNodePoolUpgradeRequest, SandboxConfig, + ScheduleUpgradeConfig, SecondaryBootDisk, SecondaryBootDiskUpdateStrategy, SecretManagerConfig, + SecretSyncConfig, SecurityBulletinEvent, SecurityPostureConfig, ServerConfig, @@ -197,11 +207,13 @@ ShieldedInstanceConfig, ShieldedNodes, SliceControllerConfig, + SlurmOperatorConfig, SoleTenantConfig, StackType, StartIPRotationRequest, StatefulHAConfig, StatusCondition, + TaintConfig, TimeWindow, TopologyManager, UpdateClusterRequest, @@ -251,6 +263,7 @@ "CloudRunConfig", "Cluster", "ClusterAutoscaling", + "ClusterPolicyConfig", "ClusterUpdate", "ClusterUpgradeInfo", "CompleteIPRotationRequest", @@ -259,6 +272,7 @@ "ConfidentialNodes", "ConfigConnectorConfig", "ContainerdConfig", + "ControlPlaneEgress", "ControlPlaneEndpointsConfig", "CostManagementConfig", "CreateClusterRequest", @@ -271,6 +285,7 @@ "DeleteNodePoolRequest", "DesiredAdditionalIPRangesConfig", "DesiredEnterpriseConfig", + "DisruptionBudget", "DisruptionEvent", "DnsCacheConfig", "DNSConfig", @@ -329,6 +344,7 @@ "MaintenanceExclusionOptions", "MaintenancePolicy", "MaintenanceWindow", + "ManagedMachineLearningDiagnosticsConfig", "ManagedOpenTelemetryConfig", "ManagedPrometheusConfig", "MasterAuth", @@ -345,6 +361,7 @@ "NetworkTierConfig", "NodeConfig", "NodeConfigDefaults", + "NodeCreationConfig", "NodeKubeletConfig", "NodeLabels", "NodeManagement", @@ -355,6 +372,7 @@ "NodePoolDefaults", "NodePoolLoggingConfig", "NodePoolUpgradeInfo", + "NodeReadinessConfig", "NodeTaint", "NodeTaints", "NotificationConfig", @@ -363,6 +381,7 @@ "ParallelstoreCsiDriverConfig", "PodAutoscaling", "PodCIDROverprovisionConfig", + "PodSnapshotConfig", "PrivateClusterConfig", "PrivateClusterMasterGlobalAccessConfig", "PrivilegedAdmissionConfig", @@ -371,6 +390,7 @@ "RayClusterMonitoringConfig", "RayOperatorConfig", "RBACBindingConfig", + "RecurringMaintenanceWindow", "RecurringTimeWindow", "ReleaseChannel", "ReservationAffinity", @@ -380,9 +400,11 @@ "ResourceUsageExportConfig", "RollbackNodePoolUpgradeRequest", "SandboxConfig", + "ScheduleUpgradeConfig", "SecondaryBootDisk", "SecondaryBootDiskUpdateStrategy", "SecretManagerConfig", + "SecretSyncConfig", "SecurityBulletinEvent", "SecurityPostureConfig", "ServerConfig", @@ -402,10 +424,12 @@ "ShieldedInstanceConfig", "ShieldedNodes", "SliceControllerConfig", + "SlurmOperatorConfig", "SoleTenantConfig", "StartIPRotationRequest", "StatefulHAConfig", "StatusCondition", + "TaintConfig", "TimeWindow", "TopologyManager", "UpdateClusterRequest", diff --git a/packages/google-cloud-container/google/cloud/container_v1/types/cluster_service.py b/packages/google-cloud-container/google/cloud/container_v1/types/cluster_service.py index 28c63ca5ab3c..0ffa8e58aa62 100644 --- a/packages/google-cloud-container/google/cloud/container_v1/types/cluster_service.py +++ b/packages/google-cloud-container/google/cloud/container_v1/types/cluster_service.py @@ -22,6 +22,8 @@ import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore import google.rpc.code_pb2 as code_pb2 # type: ignore import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.date_pb2 as date_pb2 # type: ignore +import google.type.timeofday_pb2 as timeofday_pb2 # type: ignore import proto # type: ignore __protobuf__ = proto.module( @@ -43,6 +45,7 @@ "EvictionGracePeriod", "EvictionMinimumReclaim", "NodeConfig", + "TaintConfig", "AdvancedMachineFeatures", "NodeNetworkConfig", "AdditionalNodeNetworkConfig", @@ -77,6 +80,8 @@ "ParallelstoreCsiDriverConfig", "HighScaleCheckpointingConfig", "LustreCsiDriverConfig", + "SlurmOperatorConfig", + "NodeReadinessConfig", "SliceControllerConfig", "RayOperatorConfig", "GkeBackupAgentConfig", @@ -88,6 +93,8 @@ "PodCIDROverprovisionConfig", "IPAllocationPolicy", "Cluster", + "NodeCreationConfig", + "ControlPlaneEgress", "RBACBindingConfig", "UserManagedKeysConfig", "AnonymousAuthenticationConfig", @@ -136,10 +143,12 @@ "BestEffortProvisioning", "AutoUpgradeOptions", "MaintenancePolicy", + "DisruptionBudget", "MaintenanceWindow", "TimeWindow", "MaintenanceExclusionOptions", "RecurringTimeWindow", + "RecurringMaintenanceWindow", "DailyMaintenanceWindow", "SetNodePoolManagementRequest", "SetNodePoolSizeRequest", @@ -202,6 +211,7 @@ "UpgradeAvailableEvent", "SecurityBulletinEvent", "Autopilot", + "ClusterPolicyConfig", "PrivilegedAdmissionConfig", "WorkloadPolicyConfig", "LoggingConfig", @@ -231,9 +241,13 @@ "UpgradeDetails", "FetchNodePoolUpgradeInfoRequest", "NodePoolUpgradeInfo", + "ScheduleUpgradeConfig", "GkeAutoUpgradeConfig", "NetworkTierConfig", + "SecretSyncConfig", "ManagedOpenTelemetryConfig", + "ManagedMachineLearningDiagnosticsConfig", + "PodSnapshotConfig", }, ) @@ -318,7 +332,7 @@ class NodePoolUpdateStrategy(proto.Enum): upgrade parallelism. SHORT_LIVED (5): SHORT_LIVED is the dedicated upgrade strategy for - QueuedProvisioning and flex start nodepools scaled up only + QueuedProvisioning and flex start node pools scaled up only by enqueueing to the Dynamic Workload Scheduler (DWS). """ @@ -447,6 +461,9 @@ class LinuxNodeConfig(proto.Message): See https://docs.kernel.org/admin-guide/mm/transhuge.html for more details. + custom_node_init (google.cloud.container_v1.types.LinuxNodeConfig.CustomNodeInit): + Optional. Allow users to run arbitrary bash + script or container on the node. swap_config (google.cloud.container_v1.types.LinuxNodeConfig.SwapConfig): Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. @@ -458,6 +475,11 @@ class LinuxNodeConfig(proto.Message): will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. + accurate_time_config (google.cloud.container_v1.types.LinuxNodeConfig.AccurateTimeConfig): + Optional. The accurate time configuration for + the node pool. + + This field is a member of `oneof`_ ``_accurate_time_config``. """ class CgroupMode(proto.Enum): @@ -573,6 +595,72 @@ class HugepagesConfig(proto.Message): optional=True, ) + class CustomNodeInit(proto.Message): + r"""Support for running custom init code while bootstrapping + nodes. + + Attributes: + init_script (google.cloud.container_v1.types.LinuxNodeConfig.CustomNodeInit.InitScript): + Optional. The init script to be executed on + the node. + """ + + class InitScript(proto.Message): + r"""InitScript provide a simply bash script to be executed on the + node. + + Attributes: + gcs_uri (str): + The Cloud Storage URI for storing the init script. Format: + gs://BUCKET_NAME/OBJECT_NAME The service account on the node + pool must have read access to the object. User can't + configure both gcs_uri and gcp_secret_manager_secret_uri. + gcs_generation (int): + The generation of the init script stored in Gloud Storage. + This is the required field to identify the version of the + init script. User can get the genetaion from + ``gcloud storage objects describe gs://BUCKET_NAME/OBJECT_NAME --format="value(generation)"`` + or from the "Version history" tab of the object in the Cloud + Console UI. + args (MutableSequence[str]): + Optional. The optional arguments line to be + passed to the init script. + gcp_secret_manager_secret_uri (str): + The resource name of the secret manager secret hosting the + init script. Both global and regional secrets are supported + with format below: Global secret: + projects/{project}/secrets/{secret}/versions/{version} + Regional secret: + projects/{project}/locations/{location}/secrets/{secret}/versions/{version} + Example: projects/1234567890/secrets/script_1/versions/1. + Accept version number only, not support version alias. User + can't configure both gcp_secret_manager_secret_uri and + gcs_uri. + """ + + gcs_uri: str = proto.Field( + proto.STRING, + number=1, + ) + gcs_generation: int = proto.Field( + proto.INT64, + number=2, + ) + args: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + gcp_secret_manager_secret_uri: str = proto.Field( + proto.STRING, + number=4, + ) + + init_script: "LinuxNodeConfig.CustomNodeInit.InitScript" = proto.Field( + proto.MESSAGE, + number=1, + message="LinuxNodeConfig.CustomNodeInit.InitScript", + ) + class SwapConfig(proto.Message): r"""Configuration for swap memory on a node pool. @@ -753,7 +841,7 @@ class NodeKernelModuleLoading(proto.Message): class Policy(proto.Enum): r"""Defines the kernel module loading policy for nodes in the - nodepool. + node pool. Values: POLICY_UNSPECIFIED (0): @@ -787,6 +875,27 @@ class Policy(proto.Enum): enum="LinuxNodeConfig.NodeKernelModuleLoading.Policy", ) + class AccurateTimeConfig(proto.Message): + r"""AccurateTimeConfig contains configuration for the accurate + time synchronization feature. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + enable_ptp_kvm_time_sync (bool): + Enables enhanced time synchronization using + PTP-KVM. + + This field is a member of `oneof`_ ``_enable_ptp_kvm_time_sync``. + """ + + enable_ptp_kvm_time_sync: bool = proto.Field( + proto.BOOL, + number=1, + optional=True, + ) + sysctls: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, @@ -813,6 +922,11 @@ class Policy(proto.Enum): number=5, enum=TransparentHugepageDefrag, ) + custom_node_init: CustomNodeInit = proto.Field( + proto.MESSAGE, + number=11, + message=CustomNodeInit, + ) swap_config: SwapConfig = proto.Field( proto.MESSAGE, number=12, @@ -824,6 +938,12 @@ class Policy(proto.Enum): number=13, message=NodeKernelModuleLoading, ) + accurate_time_config: AccurateTimeConfig = proto.Field( + proto.MESSAGE, + number=14, + optional=True, + message=AccurateTimeConfig, + ) class WindowsNodeConfig(proto.Message): @@ -1053,6 +1173,10 @@ class NodeKubeletConfig(proto.Message): individually instead of as a group. This field is a member of `oneof`_ ``_single_process_oom_kill``. + crash_loop_back_off (google.cloud.container_v1.types.NodeKubeletConfig.CrashLoopBackOffConfig): + Optional. Contains configuration options to + modify node-level parameters for container + restart behavior. shutdown_grace_period_seconds (int): Optional. shutdown_grace_period_seconds is the maximum allowed grace period (in seconds) the total duration that @@ -1077,6 +1201,34 @@ class NodeKubeletConfig(proto.Message): This field is a member of `oneof`_ ``_shutdown_grace_period_critical_pods_seconds``. """ + class CrashLoopBackOffConfig(proto.Message): + r"""Contains config to modify node-level parameters for container + restart behavior. + + Attributes: + max_container_restart_period (str): + Optional. The maximum duration the backoff + delay can accrue to for container restarts, + minimum 1 second, maximum 300 seconds. If not + set, defaults to the internal crashloopbackoff + maximum. + + The string must be a sequence of decimal + numbers, each with optional fraction and a unit + suffix, such as "300ms". + Valid time units are "ns", "us" (or "µs"), "ms", + "s", "m", "h". + + See + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#configurable-container-restart-delay + for more details. + """ + + max_container_restart_period: str = proto.Field( + proto.STRING, + number=1, + ) + cpu_manager_policy: str = proto.Field( proto.STRING, number=1, @@ -1165,6 +1317,11 @@ class NodeKubeletConfig(proto.Message): number=22, optional=True, ) + crash_loop_back_off: CrashLoopBackOffConfig = proto.Field( + proto.MESSAGE, + number=24, + message=CrashLoopBackOffConfig, + ) shutdown_grace_period_seconds: int = proto.Field( proto.INT32, number=26, @@ -1570,18 +1727,25 @@ class NodeConfig(proto.Message): https://cloud.google.com/kubernetes-engine/docs/concepts/node-images for available image types. labels (MutableMapping[str, str]): - The map of Kubernetes labels (key/value - pairs) to be applied to each node. These will - added in addition to any default label(s) that - Kubernetes may apply to the node. - In case of conflict in label keys, the applied - set may differ depending on the Kubernetes - version -- it's best to assume the behavior is - undefined and conflicts should be avoided. - For more information, including usage and the - valid values, see: - - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + The Kubernetes labels (key/value pairs) to apply to each + node. The values in this field are added to the set of + default labels Kubernetes applies to nodes. + + This field has the following restrictions: + + - Labels must use a valid Kubernetes syntax and character + set, as defined in + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set. + - This field supports up to 1,024 total characters in a + single request. + + Depending on the Kubernetes version, keys in this field + might conflict with the keys of the default labels, which + might change which of your labels are applied to the nodes. + Assume that the behavior is unpredictable and avoid label + key conflicts. For more information about the default + labels, see: + https://kubernetes.io/docs/reference/labels-annotations-taints/ local_ssd_count (int): The number of local SSD disks to be attached to the node. @@ -1746,6 +1910,11 @@ class NodeConfig(proto.Message): underutilized nodes. If not set, nodes are scaled down by default behavior, i.e. according to the chosen autoscaling profile. + taint_config (google.cloud.container_v1.types.TaintConfig): + Optional. The taint configuration for the + node pool. + + This field is a member of `oneof`_ ``_taint_config``. """ class LocalSsdEncryptionMode(proto.Enum): @@ -2018,6 +2187,55 @@ class EffectiveCgroupMode(proto.Enum): number=60, message=duration_pb2.Duration, ) + taint_config: "TaintConfig" = proto.Field( + proto.MESSAGE, + number=62, + optional=True, + message="TaintConfig", + ) + + +class TaintConfig(proto.Message): + r"""TaintConfig contains the configuration for the taints of the + node pool. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + architecture_taint_behavior (google.cloud.container_v1.types.TaintConfig.ArchitectureTaintBehavior): + Optional. Controls architecture tainting + behavior. + + This field is a member of `oneof`_ ``_architecture_taint_behavior``. + """ + + class ArchitectureTaintBehavior(proto.Enum): + r"""Controls architecture tainting behavior for a node pool. + New values may be added in the future. + + Values: + ARCHITECTURE_TAINT_BEHAVIOR_UNSPECIFIED (0): + Specifies that the behavior is unspecified, + defaults to ARM. + NONE (1): + Disables default architecture taints on the + node pool. + ARM (2): + Taints all the nodes in the node pool with + the default ARM taint. + """ + + ARCHITECTURE_TAINT_BEHAVIOR_UNSPECIFIED = 0 + NONE = 1 + ARM = 2 + + architecture_taint_behavior: ArchitectureTaintBehavior = proto.Field( + proto.ENUM, + number=2, + optional=True, + enum=ArchitectureTaintBehavior, + ) class AdvancedMachineFeatures(proto.Message): @@ -2148,7 +2366,7 @@ class NodeNetworkConfig(proto.Message): This field is a member of `oneof`_ ``_network_performance_config``. pod_cidr_overprovision_config (google.cloud.container_v1.types.PodCIDROverprovisionConfig): [PRIVATE FIELD] Pod CIDR size overprovisioning config for - the nodepool. + the node pool. Pod CIDR size per node depends on max_pods_per_node. By default, the value of max_pods_per_node is rounded off to @@ -2174,26 +2392,34 @@ class NodeNetworkConfig(proto.Message): The ratio is Usage/[Total number of IPs in the secondary range], Usage=numNodes\ *numZones*\ podIPsPerNode. subnetwork (str): - Optional. The subnetwork name/path for the - node pool. Format: + Optional. The subnetwork name/path for the node pool. + Format: projects/{project}/regions/{region}/subnetworks/{subnetwork} - If the cluster is associated with multiple - subnetworks, the subnetwork can be either: - - 1. A user supplied subnetwork name/full path - during node pool creation. Example1: - my-subnet - Example2: - projects/gke-project/regions/us-central1/subnetworks/my-subnet - 2. A subnetwork path picked based on the IP - utilization during node pool creation and - is immutable. + If the cluster is associated with multiple subnetworks, the + subnetwork can be either: + + - A user supplied subnetwork name during node pool creation + (e.g., ``my-subnet``). The name must be between 1 and 63 + characters long, start with a letter, contain only + letters, numbers, and hyphens, and end with a letter or a + number. + - A full subnetwork path during node pool creation, such as + ``projects/gke-project/regions/us-central1/subnetworks/my-subnet`` + - A subnetwork path picked based on the IP utilization + during node pool creation and is immutable. network_tier_config (google.cloud.container_v1.types.NetworkTierConfig): Output only. The network tier configuration for the node pool inherits from the cluster-level configuration and remains immutable throughout the node pool's lifecycle, including during upgrades. + accelerator_network_profile (str): + Immutable. The accelerator network profile + for the node pool. For now the only valid value + is "auto". If specified, the network + configuration of the nodes in this node pool + will be managed by this profile for the + supported machine types, zone, etc. """ class NetworkPerformanceConfig(proto.Message): @@ -2285,6 +2511,10 @@ class Tier(proto.Enum): number=20, message="NetworkTierConfig", ) + accelerator_network_profile: str = proto.Field( + proto.STRING, + number=21, + ) class AdditionalNodeNetworkConfig(proto.Message): @@ -2596,13 +2826,11 @@ class CertificateAuthorityDomainConfig(proto.Message): Attributes: fqdns (MutableSequence[str]): - List of fully qualified domain names (FQDN). - Specifying port is supported. - Wildcards are NOT supported. - Examples: + List of fully qualified domain names (FQDN). Specifying port + is supported. Wildcards are NOT supported. Examples: - - my.customdomain.com - - 10.0.1.2:5000 + - ``my.customdomain.com`` + - ``10.0.1.2:5000`` gcp_secret_manager_certificate_config (google.cloud.container_v1.types.ContainerdConfig.PrivateRegistryAccessConfig.CertificateAuthorityDomainConfig.GCPSecretManagerCertificateConfig): Secret Manager certificate configuration. @@ -2670,18 +2898,15 @@ class RegistryHostConfig(proto.Message): Attributes: server (str): - Defines the host name of the registry server, - which will be used to create configuration file - as /etc/containerd/hosts.d//hosts.toml. - It supports fully qualified domain names (FQDN) - and IP addresses: - - Specifying port is supported. - Wildcards are NOT supported. - Examples: - - - my.customdomain.com - - 10.0.1.2:5000 + Defines the host name of the registry server, which will be + used to create configuration file as + /etc/containerd/hosts.d//hosts.toml. It supports fully + qualified domain names (FQDN) and IP addresses: Specifying + port is supported, while scheme and path are NOT supported. + Wildcards are NOT supported. Examples: + + - ``my.customdomain.com`` + - ``10.0.1.2:5000`` hosts (MutableSequence[google.cloud.container_v1.types.ContainerdConfig.RegistryHostConfig.HostConfig]): HostConfig configures a list of host-specific configurations for the server. @@ -2783,16 +3008,14 @@ class HostConfig(proto.Message): Attributes: host (str): - Host configures the registry host/mirror. - It supports fully qualified domain names (FQDN) - and IP addresses: - - Specifying port is supported. - Wildcards are NOT supported. - Examples: - - - my.customdomain.com - - 10.0.1.2:5000 + Host configures the registry host/mirror. It supports fully + qualified domain names (FQDNs) and IP addresses. Specifying + scheme, port or path is supported. Scheme can only be http + or https. Wildcards are NOT supported. Examples: + + - ``my.customdomain.com`` + - ``https://my.customdomain.com/path`` + - ``10.0.1.2:5000`` capabilities (MutableSequence[google.cloud.container_v1.types.ContainerdConfig.RegistryHostConfig.HostCapability]): Capabilities represent the capabilities of the registry host, specifying what operations a @@ -3166,9 +3389,17 @@ class AddonsConfig(proto.Message): Checkpointing add-on. lustre_csi_driver_config (google.cloud.container_v1.types.LustreCsiDriverConfig): Configuration for the Lustre CSI driver. + pod_snapshot_config (google.cloud.container_v1.types.PodSnapshotConfig): + Optional. Configuration for the Pod Snapshot + feature. + slurm_operator_config (google.cloud.container_v1.types.SlurmOperatorConfig): + Configuration for the Slurm Operator. slice_controller_config (google.cloud.container_v1.types.SliceControllerConfig): Optional. Configuration for the slice controller add-on. + node_readiness_config (google.cloud.container_v1.types.NodeReadinessConfig): + Optional. Configuration for + NodeReadinessController add-on. """ http_load_balancing: "HttpLoadBalancing" = proto.Field( @@ -3253,11 +3484,26 @@ class AddonsConfig(proto.Message): number=23, message="LustreCsiDriverConfig", ) + pod_snapshot_config: "PodSnapshotConfig" = proto.Field( + proto.MESSAGE, + number=24, + message="PodSnapshotConfig", + ) + slurm_operator_config: "SlurmOperatorConfig" = proto.Field( + proto.MESSAGE, + number=25, + message="SlurmOperatorConfig", + ) slice_controller_config: "SliceControllerConfig" = proto.Field( proto.MESSAGE, number=26, message="SliceControllerConfig", ) + node_readiness_config: "NodeReadinessConfig" = proto.Field( + proto.MESSAGE, + number=29, + message="NodeReadinessConfig", + ) class HttpLoadBalancing(proto.Message): @@ -3630,6 +3876,14 @@ class LustreCsiDriverConfig(proto.Message): longer required as of GKE node version 1.33.2-gke.4655000, unless you are connecting to a Lustre instance that has the ``gke-support-enabled`` flag. + disable_multi_nic (bool): + When set to true, this disables multi-NIC + support for the Lustre CSI driver. + By default, GKE enables multi-NIC support, which + allows the Lustre CSI driver to automatically + detect and configure all suitable network + interfaces on a node to maximize I/O performance + for demanding workloads. """ enabled: bool = proto.Field( @@ -3640,6 +3894,41 @@ class LustreCsiDriverConfig(proto.Message): proto.BOOL, number=3, ) + disable_multi_nic: bool = proto.Field( + proto.BOOL, + number=4, + ) + + +class SlurmOperatorConfig(proto.Message): + r"""Configuration for the Slurm Operator. + + Attributes: + enabled (bool): + When enabled, it runs a Slurm Operator that + manages the set of compute pods for Slurm + Cluster. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + +class NodeReadinessConfig(proto.Message): + r"""Configuration for the GKE Node Readiness Controller. + + Attributes: + enabled (bool): + Optional. Whether the GKE Node Readiness + Controller is enabled for this cluster. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) class SliceControllerConfig(proto.Message): @@ -4517,8 +4806,8 @@ class Cluster(proto.Message): fleet (google.cloud.container_v1.types.Fleet): Fleet information for the cluster. security_posture_config (google.cloud.container_v1.types.SecurityPostureConfig): - Enable/Disable Security Posture API features - for the cluster. + Optional. Enable/Disable Security Posture API + features for the cluster. control_plane_endpoints_config (google.cloud.container_v1.types.ControlPlaneEndpointsConfig): Configuration for all cluster's control plane endpoints. @@ -4532,8 +4821,12 @@ class Cluster(proto.Message): secret_manager_config (google.cloud.container_v1.types.SecretManagerConfig): Secret CSI driver configuration. compliance_posture_config (google.cloud.container_v1.types.CompliancePostureConfig): - Enable/Disable Compliance Posture features - for the cluster. + Optional. Deprecated: Compliance Posture is + no longer supported. For more details, see + https://cloud.google.com/kubernetes-engine/docs/deprecations/posture-management-deprecation. + + Enable/Disable Compliance Posture features for + the cluster. satisfies_pzs (bool): Output only. Reserved for future use. @@ -4558,9 +4851,24 @@ class Cluster(proto.Message): anonymous_authentication_config (google.cloud.container_v1.types.AnonymousAuthenticationConfig): Configuration for limiting anonymous access to all endpoints except the health checks. + schedule_upgrade_config (google.cloud.container_v1.types.ScheduleUpgradeConfig): + Optional. Configuration for scheduled + upgrades. + secret_sync_config (google.cloud.container_v1.types.SecretSyncConfig): + Configuration for sync Secret Manager secrets + as k8s secrets. managed_opentelemetry_config (google.cloud.container_v1.types.ManagedOpenTelemetryConfig): Configuration for Managed OpenTelemetry pipeline. + control_plane_egress (google.cloud.container_v1.types.ControlPlaneEgress): + Configuration for control plane egress + control. + managed_machine_learning_diagnostics_config (google.cloud.container_v1.types.ManagedMachineLearningDiagnosticsConfig): + Configuration for Managed Machine Learning + Diagnostics. + node_creation_config (google.cloud.container_v1.types.NodeCreationConfig): + Optional. Configuration for Node Creation + Mode. """ class Status(proto.Enum): @@ -4964,11 +5272,103 @@ class Status(proto.Enum): number=164, message="AnonymousAuthenticationConfig", ) + schedule_upgrade_config: "ScheduleUpgradeConfig" = proto.Field( + proto.MESSAGE, + number=165, + message="ScheduleUpgradeConfig", + ) + secret_sync_config: "SecretSyncConfig" = proto.Field( + proto.MESSAGE, + number=166, + message="SecretSyncConfig", + ) managed_opentelemetry_config: "ManagedOpenTelemetryConfig" = proto.Field( proto.MESSAGE, number=168, message="ManagedOpenTelemetryConfig", ) + control_plane_egress: "ControlPlaneEgress" = proto.Field( + proto.MESSAGE, + number=169, + message="ControlPlaneEgress", + ) + managed_machine_learning_diagnostics_config: "ManagedMachineLearningDiagnosticsConfig" = proto.Field( + proto.MESSAGE, + number=171, + message="ManagedMachineLearningDiagnosticsConfig", + ) + node_creation_config: "NodeCreationConfig" = proto.Field( + proto.MESSAGE, + number=174, + message="NodeCreationConfig", + ) + + +class NodeCreationConfig(proto.Message): + r"""NodeCreationConfig defines the settings of node creation + mode. + + Attributes: + node_creation_mode (google.cloud.container_v1.types.NodeCreationConfig.Mode): + The mode of node creation. + """ + + class Mode(proto.Enum): + r"""The mode of node creation. + + Values: + MODE_UNSPECIFIED (0): + When no user input is provided. + VIA_KUBELET (1): + Kubelet registers itself. + VIA_CONTROL_PLANE (2): + gcp-controller-manager automatically creates + the node object after CSR approval. + """ + + MODE_UNSPECIFIED = 0 + VIA_KUBELET = 1 + VIA_CONTROL_PLANE = 2 + + node_creation_mode: Mode = proto.Field( + proto.ENUM, + number=1, + enum=Mode, + ) + + +class ControlPlaneEgress(proto.Message): + r"""ControlPlaneEgress defines the settings needed to enable + control plane egress control. + + Attributes: + mode (google.cloud.container_v1.types.ControlPlaneEgress.Mode): + Defines the mode of control plane egress. + """ + + class Mode(proto.Enum): + r"""Mode defines the mode of control plane egress. + + Values: + MODE_UNSPECIFIED (0): + Default value not specified. + VIA_CONTROL_PLANE (1): + Control plane has public IP and no + restriction on egress. + NONE (2): + No public IP on control plane and only + internal allowlisted egress. + """ + + MODE_UNSPECIFIED = 0 + VIA_CONTROL_PLANE = 1 + NONE = 2 + + mode: Mode = proto.Field( + proto.ENUM, + number=1, + enum=Mode, + ) class RBACBindingConfig(proto.Message): @@ -5127,7 +5527,11 @@ class Mode(proto.Enum): class CompliancePostureConfig(proto.Message): - r"""CompliancePostureConfig defines the settings needed to + r"""Deprecated: Compliance Posture is no longer supported. + For more details, see + https://cloud.google.com/kubernetes-engine/docs/deprecations/posture-management-deprecation. + + CompliancePostureConfig defines the settings needed to enable/disable features for the Compliance Posture. @@ -5240,6 +5644,11 @@ class Mode(proto.Enum): Applies Security Posture features on the cluster. ENTERPRISE (3): + Deprecated: Security Posture Enterprise + features are no longer supported. For more + details, see + https://cloud.google.com/kubernetes-engine/docs/deprecations/posture-management-deprecation. + Applies the Security Posture off cluster Enterprise level features. """ @@ -5260,6 +5669,10 @@ class VulnerabilityMode(proto.Enum): Disables vulnerability scanning on the cluster. VULNERABILITY_BASIC (2): + Deprecated: Basic vulnerability scanning is + no longer supported. For more details, see + https://cloud.google.com/kubernetes-engine/docs/deprecations/posture-management-deprecation. + Applies basic vulnerability scanning on the cluster. VULNERABILITY_ENTERPRISE (3): @@ -5667,8 +6080,12 @@ class ClusterUpdate(proto.Message): This field is a member of `oneof`_ ``_desired_secret_manager_config``. desired_compliance_posture_config (google.cloud.container_v1.types.CompliancePostureConfig): - Enable/Disable Compliance Posture features - for the cluster. + Deprecated: Compliance Posture is no longer + supported. For more details, see + https://cloud.google.com/kubernetes-engine/docs/deprecations/posture-management-deprecation. + + Enable/Disable Compliance Posture features for + the cluster. This field is a member of `oneof`_ ``_desired_compliance_posture_config``. desired_node_kubelet_config (google.cloud.container_v1.types.NodeKubeletConfig): @@ -5724,12 +6141,27 @@ class ClusterUpdate(proto.Message): desired_network_tier_config (google.cloud.container_v1.types.NetworkTierConfig): The desired network tier configuration for the cluster. + desired_secret_sync_config (google.cloud.container_v1.types.SecretSyncConfig): + Configuration for sync Secret Manager secrets + as k8s secrets. desired_privileged_admission_config (google.cloud.container_v1.types.PrivilegedAdmissionConfig): The desired privileged admission config for the cluster. + desired_control_plane_egress (google.cloud.container_v1.types.ControlPlaneEgress): + The desired control plane egress control + config for the cluster. desired_managed_opentelemetry_config (google.cloud.container_v1.types.ManagedOpenTelemetryConfig): The desired managed open telemetry configuration. + desired_autopilot_cluster_policy_config (google.cloud.container_v1.types.ClusterPolicyConfig): + The desired autopilot cluster policies that + to be enforced in the cluster. + desired_managed_machine_learning_diagnostics_config (google.cloud.container_v1.types.ManagedMachineLearningDiagnosticsConfig): + The desired managed machine learning + diagnostics configuration. + desired_node_creation_config (google.cloud.container_v1.types.NodeCreationConfig): + Optional. The desired NodeCreationConfig for + the cluster. """ desired_node_version: str = proto.Field( @@ -6096,16 +6528,41 @@ class ClusterUpdate(proto.Message): number=155, message="NetworkTierConfig", ) + desired_secret_sync_config: "SecretSyncConfig" = proto.Field( + proto.MESSAGE, + number=158, + message="SecretSyncConfig", + ) desired_privileged_admission_config: "PrivilegedAdmissionConfig" = proto.Field( proto.MESSAGE, number=159, message="PrivilegedAdmissionConfig", ) + desired_control_plane_egress: "ControlPlaneEgress" = proto.Field( + proto.MESSAGE, + number=160, + message="ControlPlaneEgress", + ) desired_managed_opentelemetry_config: "ManagedOpenTelemetryConfig" = proto.Field( proto.MESSAGE, number=163, message="ManagedOpenTelemetryConfig", ) + desired_autopilot_cluster_policy_config: "ClusterPolicyConfig" = proto.Field( + proto.MESSAGE, + number=164, + message="ClusterPolicyConfig", + ) + desired_managed_machine_learning_diagnostics_config: "ManagedMachineLearningDiagnosticsConfig" = proto.Field( + proto.MESSAGE, + number=166, + message="ManagedMachineLearningDiagnosticsConfig", + ) + desired_node_creation_config: "NodeCreationConfig" = proto.Field( + proto.MESSAGE, + number=171, + message="NodeCreationConfig", + ) class AdditionalPodRangesConfig(proto.Message): @@ -6155,7 +6612,7 @@ class AdditionalIPRangesConfig(proto.Message): class Status(proto.Enum): r"""Additional subnet with DRAINING status will not be selected during new node pool creation. To undrain the draining status, update the - cluster to set the sunbet to ACTIVE status. To remove the additional + cluster to set the subnet to ACTIVE status. To remove the additional subnet, use the update cluster API to remove the subnet from the desired_additional_ip_ranges list. IP ranges can be removed regardless of its status, as long as no node pools are using them. @@ -6989,6 +7446,8 @@ class UpdateNodePoolRequest(proto.Message): underutilized nodes. If not set, nodes are scaled down by default behavior, i.e. according to the chosen autoscaling profile. + taint_config (google.cloud.container_v1.types.TaintConfig): + The taint configuration for the node pool. """ project_id: str = proto.Field( @@ -7163,6 +7622,11 @@ class UpdateNodePoolRequest(proto.Message): number=49, message=duration_pb2.Duration, ) + taint_config: "TaintConfig" = proto.Field( + proto.MESSAGE, + number=51, + message="TaintConfig", + ) class SetNodePoolAutoscalingRequest(proto.Message): @@ -8366,6 +8830,9 @@ class NodePool(proto.Message): node_drain_config (google.cloud.container_v1.types.NodePool.NodeDrainConfig): Specifies the node drain configuration for this node pool. + maintenance_policy (google.cloud.container_v1.types.NodePool.NodePoolMaintenancePolicy): + Optional. Specifies the maintenance policy + for the node pool. """ class Status(proto.Enum): @@ -8651,7 +9118,7 @@ class QueuedProvisioning(proto.Message): Attributes: enabled (bool): - Denotes that this nodepool is QRM specific, + Denotes that this node pool is QRM specific, meaning nodes can be only obtained through queuing via the Cluster Autoscaler ProvisioningRequest API. @@ -8664,12 +9131,22 @@ class QueuedProvisioning(proto.Message): class NodeDrainConfig(proto.Message): r"""NodeDrainConfig contains the node drain related - configurations for this nodepool. + configurations for this node pool. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + pdb_timeout_duration (google.protobuf.duration_pb2.Duration): + The duration of the PDB timeout period for + node drain. + + This field is a member of `oneof`_ ``_pdb_timeout_duration``. + grace_termination_duration (google.protobuf.duration_pb2.Duration): + The duration of the grace termination period + for node drain. + + This field is a member of `oneof`_ ``_grace_termination_duration``. respect_pdb_during_node_pool_deletion (bool): Whether to respect PDB during node pool deletion. @@ -8677,12 +9154,75 @@ class NodeDrainConfig(proto.Message): This field is a member of `oneof`_ ``_respect_pdb_during_node_pool_deletion``. """ + pdb_timeout_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + optional=True, + message=duration_pb2.Duration, + ) + grace_termination_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message=duration_pb2.Duration, + ) respect_pdb_during_node_pool_deletion: bool = proto.Field( proto.BOOL, number=3, optional=True, ) + class ExclusionUntilEndOfSupport(proto.Message): + r"""Defines the maintenance exclusion for the node pool. + + Attributes: + enabled (bool): + Optional. Indicates whether the exclusion is + enabled. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The start time of the + maintenance exclusion. It is output only. It is + the exclusion creation time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The end time of the maintenance + exclusion. It is output only. It is the cluster + control plane version's end of support time, or + end of extended support time when the cluster is + on extended support channel. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + class NodePoolMaintenancePolicy(proto.Message): + r"""Defines the maintenance policy for the node pool. + + Attributes: + exclusion_until_end_of_support (google.cloud.container_v1.types.NodePool.ExclusionUntilEndOfSupport): + Optional. The exclusion until end of support + for the node pool. + """ + + exclusion_until_end_of_support: "NodePool.ExclusionUntilEndOfSupport" = ( + proto.Field( + proto.MESSAGE, + number=1, + message="NodePool.ExclusionUntilEndOfSupport", + ) + ) + name: str = proto.Field( proto.STRING, number=1, @@ -8784,6 +9324,11 @@ class NodeDrainConfig(proto.Message): number=116, message=NodeDrainConfig, ) + maintenance_policy: NodePoolMaintenancePolicy = proto.Field( + proto.MESSAGE, + number=118, + message=NodePoolMaintenancePolicy, + ) class NodeManagement(proto.Message): @@ -8894,6 +9439,9 @@ class MaintenancePolicy(proto.Message): a ``get()`` request to the cluster to get the current resource version and include it with requests to set the policy. + disruption_budget (google.cloud.container_v1.types.DisruptionBudget): + Optional. The upgrade disruption budget for + the cluster control plane. """ window: "MaintenanceWindow" = proto.Field( @@ -8905,6 +9453,52 @@ class MaintenancePolicy(proto.Message): proto.STRING, number=3, ) + disruption_budget: "DisruptionBudget" = proto.Field( + proto.MESSAGE, + number=4, + message="DisruptionBudget", + ) + + +class DisruptionBudget(proto.Message): + r"""DisruptionBudget defines the upgrade disruption budget for + the cluster control plane. + + Attributes: + minor_version_disruption_interval (google.protobuf.duration_pb2.Duration): + Optional. The minimum duration between two + minor version upgrades of the control plane. + patch_version_disruption_interval (google.protobuf.duration_pb2.Duration): + Optional. The minimum duration between two + patch version upgrades of the control plane. + last_minor_version_disruption_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The last time a minor version + upgrade was performed on the control plane. + last_disruption_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The last time a disruption was + performed on the control plane. + """ + + minor_version_disruption_interval: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + patch_version_disruption_interval: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + last_minor_version_disruption_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + last_disruption_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) class MaintenanceWindow(proto.Message): @@ -8931,6 +9525,15 @@ class MaintenanceWindow(proto.Message): maintenance windows are set, maintenance can occur at any time. + This field is a member of `oneof`_ ``policy``. + recurring_maintenance_window (google.cloud.container_v1.types.RecurringMaintenanceWindow): + RecurringMaintenanceWindow specifies some + number of recurring time periods for maintenance + to occur. The time windows may be overlapping. + If no maintenance windows are set, maintenance + can occur at any time. Alternative to + RecurringWindow, with renamed fields. + This field is a member of `oneof`_ ``policy``. maintenance_exclusions (MutableMapping[str, google.cloud.container_v1.types.TimeWindow]): Exceptions to maintenance window. @@ -8950,6 +9553,12 @@ class MaintenanceWindow(proto.Message): oneof="policy", message="RecurringTimeWindow", ) + recurring_maintenance_window: "RecurringMaintenanceWindow" = proto.Field( + proto.MESSAGE, + number=5, + oneof="policy", + message="RecurringMaintenanceWindow", + ) maintenance_exclusions: MutableMapping[str, "TimeWindow"] = proto.MapField( proto.STRING, proto.MESSAGE, @@ -9114,6 +9723,69 @@ class RecurringTimeWindow(proto.Message): ) +class RecurringMaintenanceWindow(proto.Message): + r"""Represents an arbitrary window of time that recurs. + Will replace RecurringTimeWindow. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + delay_until (google.type.date_pb2.Date): + Optional. Specifies the date before which + will not be scheduled. Depending on the + recurrence, this may be the date the first + window appears. Days are measured in the UTC + timezone. This setting must be used when + INTERVAL>1 or FREQ=WEEKLY/MONTHLY and no BYDAY + specified. + + This field is a member of `oneof`_ ``_delay_until``. + window_start_time (google.type.timeofday_pb2.TimeOfDay): + Required. Start time of the window on days + that it is scheduled, assuming UTC timezone. + window_duration (google.protobuf.duration_pb2.Duration): + Required. Duration of the window. + recurrence (str): + Required. An RRULE + (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for + how this window recurs. + + For example, to have something repeat every weekday, you'd + use: ``FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR`` + + To repeat some window daily (equivalent to the + DailyMaintenanceWindow): ``FREQ=DAILY`` + + For the first weekend of every month: + ``FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU`` + + The FREQ values of HOURLY, MINUTELY, and SECONDLY are not + supported. + """ + + delay_until: date_pb2.Date = proto.Field( + proto.MESSAGE, + number=1, + optional=True, + message=date_pb2.Date, + ) + window_start_time: timeofday_pb2.TimeOfDay = proto.Field( + proto.MESSAGE, + number=2, + message=timeofday_pb2.TimeOfDay, + ) + window_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + recurrence: str = proto.Field( + proto.STRING, + number=4, + ) + + class DailyMaintenanceWindow(proto.Message): r"""Time window specified for daily maintenance operations. @@ -9407,10 +10079,13 @@ class AutopilotGeneralProfile(proto.Enum): Use default configuration. NO_PERFORMANCE (1): Avoid extra IP consumption. + NONE (2): + Use default configuration. """ AUTOPILOT_GENERAL_PROFILE_UNSPECIFIED = 0 NO_PERFORMANCE = 1 + NONE = 2 enable_node_autoprovisioning: bool = proto.Field( proto.BOOL, @@ -9630,8 +10305,8 @@ class NodePoolAutoscaling(proto.Message): autoprovisioned (bool): Can this node pool be deleted automatically. location_policy (google.cloud.container_v1.types.NodePoolAutoscaling.LocationPolicy): - Location policy used when scaling up a - nodepool. + Location policy used when scaling up a node + pool. total_min_node_count (int): Minimum number of nodes in the node pool. Must be greater than or equal to 0 and less than or equal to @@ -9647,7 +10322,7 @@ class NodePoolAutoscaling(proto.Message): class LocationPolicy(proto.Enum): r"""Location policy specifies how zones are picked when scaling - up the nodepool. + up the node pool. Values: LOCATION_POLICY_UNSPECIFIED (0): @@ -11095,11 +11770,17 @@ class State(proto.Enum): Secrets in etcd are stored in plain text (at etcd level) - this is unrelated to Compute Engine level full disk encryption. + ALL_OBJECTS_ENCRYPTION_ENABLED (3): + Encryption of all objects in the storage is + enabled. There is no guarantee that all objects + in the storage are encrypted, but eventually + they will be. """ UNKNOWN = 0 ENCRYPTED = 1 DECRYPTED = 2 + ALL_OBJECTS_ENCRYPTION_ENABLED = 3 class CurrentState(proto.Enum): r"""Current State of etcd encryption. @@ -11126,6 +11807,17 @@ class CurrentState(proto.Enum): CURRENT_STATE_DECRYPTION_ERROR (6): De-crypting Secrets to plain text in etcd encountered an error. + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_ENABLED (8): + Encryption of all objects in the storage is + enabled. It does not guarantee that all objects + in the storage are encrypted, but eventually + they will be. + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_PENDING (9): + Enablement of the encryption of all objects + in storage is pending. + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_ERROR (10): + Enabling encryption of all objects in storage + encountered an error. """ CURRENT_STATE_UNSPECIFIED = 0 @@ -11135,6 +11827,9 @@ class CurrentState(proto.Enum): CURRENT_STATE_ENCRYPTION_ERROR = 4 CURRENT_STATE_DECRYPTION_PENDING = 5 CURRENT_STATE_DECRYPTION_ERROR = 6 + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_ENABLED = 8 + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_PENDING = 9 + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_ERROR = 10 class OperationError(proto.Message): r"""OperationError records errors seen from CloudKMS keys @@ -11788,6 +12483,9 @@ class State(proto.Enum): Values: STATE_UNSPECIFIED (0): STATE_UNSPECIFIED indicates the state is unspecified. + SCHEDULED (1): + SCHEDULED indicates the upgrade was + scheduled. STARTED (3): STARTED indicates the upgrade has started. SUCCEEDED (4): @@ -11800,6 +12498,7 @@ class State(proto.Enum): """ STATE_UNSPECIFIED = 0 + SCHEDULED = 1 STARTED = 3 SUCCEEDED = 4 FAILED = 5 @@ -12131,6 +12830,9 @@ class Autopilot(proto.Message): PrivilegedAdmissionConfig is the configuration related to privileged admission control. + cluster_policy_config (google.cloud.container_v1.types.ClusterPolicyConfig): + ClusterPolicyConfig denotes cluster level + policies that are enforced for the cluster. """ enabled: bool = proto.Field( @@ -12147,6 +12849,63 @@ class Autopilot(proto.Message): number=4, message="PrivilegedAdmissionConfig", ) + cluster_policy_config: "ClusterPolicyConfig" = proto.Field( + proto.MESSAGE, + number=5, + message="ClusterPolicyConfig", + ) + + +class ClusterPolicyConfig(proto.Message): + r"""ClusterPolicyConfig stores the configuration for cluster wide + policies. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + no_system_mutation (bool): + Denotes that preventing creation and mutation + of resources in GKE managed namespaces and + cluster-scoped GKE managed resources . + + This field is a member of `oneof`_ ``_no_system_mutation``. + no_system_impersonation (bool): + Denotes preventing impersonation and CSRs for + GKE System users. + + This field is a member of `oneof`_ ``_no_system_impersonation``. + no_unsafe_webhooks (bool): + Denotes preventing unsafe webhooks. + + This field is a member of `oneof`_ ``_no_unsafe_webhooks``. + no_standard_node_pools (bool): + Denotes preventing standard node pools and + requiring only autopilot node pools. + + This field is a member of `oneof`_ ``_no_standard_node_pools``. + """ + + no_system_mutation: bool = proto.Field( + proto.BOOL, + number=1, + optional=True, + ) + no_system_impersonation: bool = proto.Field( + proto.BOOL, + number=2, + optional=True, + ) + no_unsafe_webhooks: bool = proto.Field( + proto.BOOL, + number=3, + optional=True, + ) + no_standard_node_pools: bool = proto.Field( + proto.BOOL, + number=5, + optional=True, + ) class PrivilegedAdmissionConfig(proto.Message): @@ -12392,7 +13151,7 @@ class RayClusterMonitoringConfig(proto.Message): class NodePoolLoggingConfig(proto.Message): r"""NodePoolLoggingConfig specifies logging configuration for - nodepools. + node pools. Attributes: variant_config (google.cloud.container_v1.types.LoggingVariantConfig): @@ -13022,7 +13781,8 @@ class RotationConfig(proto.Message): class BootDisk(proto.Message): - r"""BootDisk specifies the boot disk configuration for nodepools. + r"""BootDisk specifies the boot disk configuration for node + pools. Attributes: disk_type (str): @@ -13356,12 +14116,12 @@ class StartType(proto.Enum): class FetchNodePoolUpgradeInfoRequest(proto.Message): r"""FetchNodePoolUpgradeInfoRequest fetches the upgrade - information of a nodepool. + information of a node pool. Attributes: name (str): - Required. The name (project, location, cluster, nodepool) of - the nodepool to get. Specified in the format + Required. The name (project, location, cluster, node pool) + of the node pool to get. Specified in the format ``projects/*/locations/*/clusters/*/nodePools/*`` or ``projects/*/zones/*/clusters/*/nodePools/*``. version (str): @@ -13381,7 +14141,7 @@ class FetchNodePoolUpgradeInfoRequest(proto.Message): class NodePoolUpgradeInfo(proto.Message): r"""NodePoolUpgradeInfo contains the upgrade information of a - nodepool. + node pool. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -13404,13 +14164,13 @@ class NodePoolUpgradeInfo(proto.Message): upgrade_details (MutableSequence[google.cloud.container_v1.types.UpgradeDetails]): The list of past auto upgrades. end_of_standard_support_timestamp (str): - The nodepool's current minor version's end of - standard support timestamp. + The node pool's current minor version's end + of standard support timestamp. This field is a member of `oneof`_ ``_end_of_standard_support_timestamp``. end_of_extended_support_timestamp (str): - The nodepool's current minor version's end of - extended support timestamp. + The node pool's current minor version's end + of extended support timestamp. This field is a member of `oneof`_ ``_end_of_extended_support_timestamp``. """ @@ -13501,6 +14261,21 @@ class AutoUpgradePausedReason(proto.Enum): ) +class ScheduleUpgradeConfig(proto.Message): + r"""Configuration for scheduled upgrades on the cluster. + + Attributes: + enabled (bool): + Optional. Whether or not scheduled upgrades + are enabled. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + class GkeAutoUpgradeConfig(proto.Message): r"""GkeAutoUpgradeConfig is the configuration for GKE auto upgrades. @@ -13579,6 +14354,67 @@ class NetworkTier(proto.Enum): ) +class SecretSyncConfig(proto.Message): + r"""Configuration for sync Secret Manager secrets as k8s secrets. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + enabled (bool): + Enable/Disable Secret Sync Config. + + This field is a member of `oneof`_ ``_enabled``. + rotation_config (google.cloud.container_v1.types.SecretSyncConfig.SyncRotationConfig): + Rotation config for secret manager. + + This field is a member of `oneof`_ ``_rotation_config``. + """ + + class SyncRotationConfig(proto.Message): + r"""SyncRotationConfig is config for secret manager auto + rotation. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + enabled (bool): + Whether the rotation is enabled. + + This field is a member of `oneof`_ ``_enabled``. + rotation_interval (google.protobuf.duration_pb2.Duration): + The interval between two consecutive + rotations. Default rotation interval is 2 + minutes. + + This field is a member of `oneof`_ ``_rotation_interval``. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + optional=True, + ) + rotation_interval: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message=duration_pb2.Duration, + ) + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + optional=True, + ) + rotation_config: SyncRotationConfig = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message=SyncRotationConfig, + ) + + class ManagedOpenTelemetryConfig(proto.Message): r"""ManagedOpenTelemetryConfig is the configuration for the GKE Managed OpenTelemetry pipeline. @@ -13620,4 +14456,42 @@ class Scope(proto.Enum): ) +class ManagedMachineLearningDiagnosticsConfig(proto.Message): + r"""ManagedMachineLearningDiagnosticsConfig is the configuration + for the GKE Managed Machine Learning Diagnostics pipeline. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + enabled (bool): + Enable/Disable Managed Machine Learning + Diagnostics. + + This field is a member of `oneof`_ ``_enabled``. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + optional=True, + ) + + +class PodSnapshotConfig(proto.Message): + r"""PodSnapshotConfig is the configuration for GKE Pod Snapshots + feature. + + Attributes: + enabled (bool): + Whether or not the Pod Snapshots feature is + enabled. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/__init__.py b/packages/google-cloud-container/google/cloud/container_v1beta1/__init__.py index 7379eb9ce2c7..0b3e86714ef4 100644 --- a/packages/google-cloud-container/google/cloud/container_v1beta1/__init__.py +++ b/packages/google-cloud-container/google/cloud/container_v1beta1/__init__.py @@ -33,6 +33,7 @@ AddonsConfig, AdvancedDatapathObservabilityConfig, AdvancedMachineFeatures, + AgentSandboxConfig, AnonymousAuthenticationConfig, AuthenticatorGroupsConfig, AutoIpamConfig, @@ -53,6 +54,7 @@ CloudRunConfig, Cluster, ClusterAutoscaling, + ClusterPolicyConfig, ClusterTelemetry, ClusterUpdate, ClusterUpgradeInfo, @@ -64,6 +66,7 @@ ConfidentialNodes, ConfigConnectorConfig, ContainerdConfig, + ControlPlaneEgress, ControlPlaneEndpointsConfig, CostManagementConfig, CreateClusterRequest, @@ -77,6 +80,7 @@ DeleteNodePoolRequest, DesiredAdditionalIPRangesConfig, DesiredEnterpriseConfig, + DisruptionBudget, DisruptionEvent, DnsCacheConfig, DNSConfig, @@ -143,6 +147,7 @@ MaintenanceExclusionOptions, MaintenancePolicy, MaintenanceWindow, + ManagedMachineLearningDiagnosticsConfig, ManagedOpenTelemetryConfig, ManagedPrometheusConfig, Master, @@ -160,6 +165,7 @@ NetworkTierConfig, NodeConfig, NodeConfigDefaults, + NodeCreationConfig, NodeKubeletConfig, NodeLabels, NodeManagement, @@ -170,7 +176,9 @@ NodePoolDefaults, NodePoolLoggingConfig, NodePoolUpdateStrategy, + NodePoolUpgradeConcurrencyConfig, NodePoolUpgradeInfo, + NodeReadinessConfig, NodeTaint, NodeTaints, NotificationConfig, @@ -191,6 +199,7 @@ RayClusterMonitoringConfig, RayOperatorConfig, RBACBindingConfig, + RecurringMaintenanceWindow, RecurringTimeWindow, ReleaseChannel, ReservationAffinity, @@ -202,6 +211,7 @@ RollbackSafeUpgrade, RollbackSafeUpgradeStatus, SandboxConfig, + ScheduleUpgradeConfig, SecondaryBootDisk, SecondaryBootDiskUpdateStrategy, SecretManagerConfig, @@ -225,11 +235,13 @@ ShieldedInstanceConfig, ShieldedNodes, SliceControllerConfig, + SlurmOperatorConfig, SoleTenantConfig, StackType, StartIPRotationRequest, StatefulHAConfig, StatusCondition, + TaintConfig, TimeWindow, TopologyManager, TpuConfig, @@ -349,6 +361,7 @@ def _get_version(dependency_name): "AddonsConfig", "AdvancedDatapathObservabilityConfig", "AdvancedMachineFeatures", + "AgentSandboxConfig", "AnonymousAuthenticationConfig", "AuthenticatorGroupsConfig", "AutoIpamConfig", @@ -370,6 +383,7 @@ def _get_version(dependency_name): "Cluster", "ClusterAutoscaling", "ClusterManagerClient", + "ClusterPolicyConfig", "ClusterTelemetry", "ClusterUpdate", "ClusterUpgradeInfo", @@ -381,6 +395,7 @@ def _get_version(dependency_name): "ConfidentialNodes", "ConfigConnectorConfig", "ContainerdConfig", + "ControlPlaneEgress", "ControlPlaneEndpointsConfig", "CostManagementConfig", "CreateClusterRequest", @@ -395,6 +410,7 @@ def _get_version(dependency_name): "DeleteNodePoolRequest", "DesiredAdditionalIPRangesConfig", "DesiredEnterpriseConfig", + "DisruptionBudget", "DisruptionEvent", "DnsCacheConfig", "EnterpriseConfig", @@ -460,6 +476,7 @@ def _get_version(dependency_name): "MaintenanceExclusionOptions", "MaintenancePolicy", "MaintenanceWindow", + "ManagedMachineLearningDiagnosticsConfig", "ManagedOpenTelemetryConfig", "ManagedPrometheusConfig", "Master", @@ -477,6 +494,7 @@ def _get_version(dependency_name): "NetworkTierConfig", "NodeConfig", "NodeConfigDefaults", + "NodeCreationConfig", "NodeKubeletConfig", "NodeLabels", "NodeManagement", @@ -487,7 +505,9 @@ def _get_version(dependency_name): "NodePoolDefaults", "NodePoolLoggingConfig", "NodePoolUpdateStrategy", + "NodePoolUpgradeConcurrencyConfig", "NodePoolUpgradeInfo", + "NodeReadinessConfig", "NodeTaint", "NodeTaints", "NotificationConfig", @@ -508,6 +528,7 @@ def _get_version(dependency_name): "RayClusterLoggingConfig", "RayClusterMonitoringConfig", "RayOperatorConfig", + "RecurringMaintenanceWindow", "RecurringTimeWindow", "ReleaseChannel", "ReservationAffinity", @@ -519,6 +540,7 @@ def _get_version(dependency_name): "RollbackSafeUpgrade", "RollbackSafeUpgradeStatus", "SandboxConfig", + "ScheduleUpgradeConfig", "SecondaryBootDisk", "SecondaryBootDiskUpdateStrategy", "SecretManagerConfig", @@ -542,11 +564,13 @@ def _get_version(dependency_name): "ShieldedInstanceConfig", "ShieldedNodes", "SliceControllerConfig", + "SlurmOperatorConfig", "SoleTenantConfig", "StackType", "StartIPRotationRequest", "StatefulHAConfig", "StatusCondition", + "TaintConfig", "TimeWindow", "TopologyManager", "TpuConfig", diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/async_client.py b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/async_client.py index 3b86fef7609d..0cc3bef83ae6 100644 --- a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/async_client.py +++ b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/async_client.py @@ -5138,7 +5138,7 @@ async def fetch_node_pool_upgrade_info( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> cluster_service.NodePoolUpgradeInfo: - r"""Fetch upgrade information of a specific nodepool. + r"""Fetch upgrade information of a specific node pool. .. code-block:: python @@ -5170,12 +5170,12 @@ async def sample_fetch_node_pool_upgrade_info(): request (Optional[Union[google.cloud.container_v1beta1.types.FetchNodePoolUpgradeInfoRequest, dict]]): The request object. FetchNodePoolUpgradeInfoRequest fetches the upgrade information of a - nodepool. + node pool. name (:class:`str`): - Required. The name (project, location, cluster, - nodepool) of the nodepool to get. Specified in the - format ``projects/*/locations/*/clusters/*/nodePools/*`` - or ``projects/*/zones/*/clusters/*/nodePools/*``. + Required. The name (project, location, cluster, node + pool) of the node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*`` or + ``projects/*/zones/*/clusters/*/nodePools/*``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -5191,7 +5191,7 @@ async def sample_fetch_node_pool_upgrade_info(): Returns: google.cloud.container_v1beta1.types.NodePoolUpgradeInfo: NodePoolUpgradeInfo contains the - upgrade information of a nodepool. + upgrade information of a node pool. """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/client.py b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/client.py index ffc06785f902..78c0bf781eb3 100644 --- a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/client.py +++ b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/client.py @@ -5534,7 +5534,7 @@ def fetch_node_pool_upgrade_info( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> cluster_service.NodePoolUpgradeInfo: - r"""Fetch upgrade information of a specific nodepool. + r"""Fetch upgrade information of a specific node pool. .. code-block:: python @@ -5566,12 +5566,12 @@ def sample_fetch_node_pool_upgrade_info(): request (Union[google.cloud.container_v1beta1.types.FetchNodePoolUpgradeInfoRequest, dict]): The request object. FetchNodePoolUpgradeInfoRequest fetches the upgrade information of a - nodepool. + node pool. name (str): - Required. The name (project, location, cluster, - nodepool) of the nodepool to get. Specified in the - format ``projects/*/locations/*/clusters/*/nodePools/*`` - or ``projects/*/zones/*/clusters/*/nodePools/*``. + Required. The name (project, location, cluster, node + pool) of the node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*`` or + ``projects/*/zones/*/clusters/*/nodePools/*``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -5587,7 +5587,7 @@ def sample_fetch_node_pool_upgrade_info(): Returns: google.cloud.container_v1beta1.types.NodePoolUpgradeInfo: NodePoolUpgradeInfo contains the - upgrade information of a nodepool. + upgrade information of a node pool. """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py index bb38fcefd15d..c4c20075c4bc 100644 --- a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py +++ b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py @@ -40,7 +40,11 @@ class ClusterManagerTransport(abc.ABC): """Abstract transport class for ClusterManager.""" - AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ) DEFAULT_HOST: str = "container.googleapis.com" diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py index 1b44a7ee2f0e..c5a768552aa3 100644 --- a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py +++ b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py @@ -1356,7 +1356,7 @@ def fetch_node_pool_upgrade_info( ]: r"""Return a callable for the fetch node pool upgrade info method over gRPC. - Fetch upgrade information of a specific nodepool. + Fetch upgrade information of a specific node pool. Returns: Callable[[~.FetchNodePoolUpgradeInfoRequest], diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py index 2620600133fb..16ff54cfd9c1 100644 --- a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py +++ b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py @@ -1414,7 +1414,7 @@ def fetch_node_pool_upgrade_info( ]: r"""Return a callable for the fetch node pool upgrade info method over gRPC. - Fetch upgrade information of a specific nodepool. + Fetch upgrade information of a specific node pool. Returns: Callable[[~.FetchNodePoolUpgradeInfoRequest], diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/types/__init__.py b/packages/google-cloud-container/google/cloud/container_v1beta1/types/__init__.py index 7020e20fe912..91350c4b87d2 100644 --- a/packages/google-cloud-container/google/cloud/container_v1beta1/types/__init__.py +++ b/packages/google-cloud-container/google/cloud/container_v1beta1/types/__init__.py @@ -22,6 +22,7 @@ AddonsConfig, AdvancedDatapathObservabilityConfig, AdvancedMachineFeatures, + AgentSandboxConfig, AnonymousAuthenticationConfig, AuthenticatorGroupsConfig, AutoIpamConfig, @@ -42,6 +43,7 @@ CloudRunConfig, Cluster, ClusterAutoscaling, + ClusterPolicyConfig, ClusterTelemetry, ClusterUpdate, ClusterUpgradeInfo, @@ -53,6 +55,7 @@ ConfidentialNodes, ConfigConnectorConfig, ContainerdConfig, + ControlPlaneEgress, ControlPlaneEndpointsConfig, CostManagementConfig, CreateClusterRequest, @@ -66,6 +69,7 @@ DeleteNodePoolRequest, DesiredAdditionalIPRangesConfig, DesiredEnterpriseConfig, + DisruptionBudget, DisruptionEvent, DnsCacheConfig, DNSConfig, @@ -132,6 +136,7 @@ MaintenanceExclusionOptions, MaintenancePolicy, MaintenanceWindow, + ManagedMachineLearningDiagnosticsConfig, ManagedOpenTelemetryConfig, ManagedPrometheusConfig, Master, @@ -149,6 +154,7 @@ NetworkTierConfig, NodeConfig, NodeConfigDefaults, + NodeCreationConfig, NodeKubeletConfig, NodeLabels, NodeManagement, @@ -159,7 +165,9 @@ NodePoolDefaults, NodePoolLoggingConfig, NodePoolUpdateStrategy, + NodePoolUpgradeConcurrencyConfig, NodePoolUpgradeInfo, + NodeReadinessConfig, NodeTaint, NodeTaints, NotificationConfig, @@ -180,6 +188,7 @@ RayClusterMonitoringConfig, RayOperatorConfig, RBACBindingConfig, + RecurringMaintenanceWindow, RecurringTimeWindow, ReleaseChannel, ReservationAffinity, @@ -191,6 +200,7 @@ RollbackSafeUpgrade, RollbackSafeUpgradeStatus, SandboxConfig, + ScheduleUpgradeConfig, SecondaryBootDisk, SecondaryBootDiskUpdateStrategy, SecretManagerConfig, @@ -214,11 +224,13 @@ ShieldedInstanceConfig, ShieldedNodes, SliceControllerConfig, + SlurmOperatorConfig, SoleTenantConfig, StackType, StartIPRotationRequest, StatefulHAConfig, StatusCondition, + TaintConfig, TimeWindow, TopologyManager, TpuConfig, @@ -254,6 +266,7 @@ "AddonsConfig", "AdvancedDatapathObservabilityConfig", "AdvancedMachineFeatures", + "AgentSandboxConfig", "AnonymousAuthenticationConfig", "AuthenticatorGroupsConfig", "AutoIpamConfig", @@ -274,6 +287,7 @@ "CloudRunConfig", "Cluster", "ClusterAutoscaling", + "ClusterPolicyConfig", "ClusterTelemetry", "ClusterUpdate", "ClusterUpgradeInfo", @@ -285,6 +299,7 @@ "ConfidentialNodes", "ConfigConnectorConfig", "ContainerdConfig", + "ControlPlaneEgress", "ControlPlaneEndpointsConfig", "CostManagementConfig", "CreateClusterRequest", @@ -297,6 +312,7 @@ "DeleteNodePoolRequest", "DesiredAdditionalIPRangesConfig", "DesiredEnterpriseConfig", + "DisruptionBudget", "DisruptionEvent", "DnsCacheConfig", "DNSConfig", @@ -362,6 +378,7 @@ "MaintenanceExclusionOptions", "MaintenancePolicy", "MaintenanceWindow", + "ManagedMachineLearningDiagnosticsConfig", "ManagedOpenTelemetryConfig", "ManagedPrometheusConfig", "Master", @@ -379,6 +396,7 @@ "NetworkTierConfig", "NodeConfig", "NodeConfigDefaults", + "NodeCreationConfig", "NodeKubeletConfig", "NodeLabels", "NodeManagement", @@ -388,7 +406,9 @@ "NodePoolAutoscaling", "NodePoolDefaults", "NodePoolLoggingConfig", + "NodePoolUpgradeConcurrencyConfig", "NodePoolUpgradeInfo", + "NodeReadinessConfig", "NodeTaint", "NodeTaints", "NotificationConfig", @@ -408,6 +428,7 @@ "RayClusterMonitoringConfig", "RayOperatorConfig", "RBACBindingConfig", + "RecurringMaintenanceWindow", "RecurringTimeWindow", "ReleaseChannel", "ReservationAffinity", @@ -419,6 +440,7 @@ "RollbackSafeUpgrade", "RollbackSafeUpgradeStatus", "SandboxConfig", + "ScheduleUpgradeConfig", "SecondaryBootDisk", "SecondaryBootDiskUpdateStrategy", "SecretManagerConfig", @@ -442,10 +464,12 @@ "ShieldedInstanceConfig", "ShieldedNodes", "SliceControllerConfig", + "SlurmOperatorConfig", "SoleTenantConfig", "StartIPRotationRequest", "StatefulHAConfig", "StatusCondition", + "TaintConfig", "TimeWindow", "TopologyManager", "TpuConfig", diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/types/cluster_service.py b/packages/google-cloud-container/google/cloud/container_v1beta1/types/cluster_service.py index d8a472632460..e2c17ecc32d1 100644 --- a/packages/google-cloud-container/google/cloud/container_v1beta1/types/cluster_service.py +++ b/packages/google-cloud-container/google/cloud/container_v1beta1/types/cluster_service.py @@ -23,6 +23,7 @@ import google.rpc.code_pb2 as code_pb2 # type: ignore import google.rpc.status_pb2 as status_pb2 # type: ignore import google.type.date_pb2 as date_pb2 # type: ignore +import google.type.timeofday_pb2 as timeofday_pb2 # type: ignore import proto # type: ignore __protobuf__ = proto.module( @@ -44,6 +45,7 @@ "EvictionGracePeriod", "EvictionMinimumReclaim", "NodeConfig", + "TaintConfig", "AdvancedMachineFeatures", "NodeNetworkConfig", "AdditionalNodeNetworkConfig", @@ -81,6 +83,9 @@ "ParallelstoreCsiDriverConfig", "HighScaleCheckpointingConfig", "LustreCsiDriverConfig", + "SlurmOperatorConfig", + "AgentSandboxConfig", + "NodeReadinessConfig", "SliceControllerConfig", "RayOperatorConfig", "PrivateClusterMasterGlobalAccessConfig", @@ -97,6 +102,8 @@ "AuthenticatorGroupsConfig", "ClusterTelemetry", "Cluster", + "NodeCreationConfig", + "ControlPlaneEgress", "RBACBindingConfig", "UserManagedKeysConfig", "AnonymousAuthenticationConfig", @@ -148,10 +155,12 @@ "NodeManagement", "AutoUpgradeOptions", "MaintenancePolicy", + "DisruptionBudget", "MaintenanceWindow", "TimeWindow", "MaintenanceExclusionOptions", "RecurringTimeWindow", + "RecurringMaintenanceWindow", "DailyMaintenanceWindow", "SetNodePoolManagementRequest", "SetNodePoolSizeRequest", @@ -218,8 +227,10 @@ "RollbackSafeUpgrade", "AutopilotConversionStatus", "Autopilot", + "ClusterPolicyConfig", "PrivilegedAdmissionConfig", "WorkloadPolicyConfig", + "NodePoolUpgradeConcurrencyConfig", "NotificationConfig", "ConfidentialNodes", "UpgradeEvent", @@ -252,10 +263,12 @@ "UpgradeDetails", "FetchNodePoolUpgradeInfoRequest", "NodePoolUpgradeInfo", + "ScheduleUpgradeConfig", "GkeAutoUpgradeConfig", "NetworkTierConfig", "SecretSyncConfig", "ManagedOpenTelemetryConfig", + "ManagedMachineLearningDiagnosticsConfig", "PodSnapshotConfig", }, ) @@ -318,7 +331,7 @@ class NodePoolUpdateStrategy(proto.Enum): upgrade parallelism. SHORT_LIVED (5): SHORT_LIVED is the dedicated upgrade strategy for - QueuedProvisioning and flex start nodepools scaled up only + QueuedProvisioning and flex start node pools scaled up only by enqueueing to the Dynamic Workload Scheduler (DWS). """ @@ -475,6 +488,9 @@ class LinuxNodeConfig(proto.Message): See https://docs.kernel.org/admin-guide/mm/transhuge.html for more details. + custom_node_init (google.cloud.container_v1beta1.types.LinuxNodeConfig.CustomNodeInit): + Optional. Allow users to run arbitrary bash + script or container on the node. swap_config (google.cloud.container_v1beta1.types.LinuxNodeConfig.SwapConfig): Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. @@ -486,6 +502,11 @@ class LinuxNodeConfig(proto.Message): will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. + accurate_time_config (google.cloud.container_v1beta1.types.LinuxNodeConfig.AccurateTimeConfig): + Optional. The accurate time configuration for + the node pool. + + This field is a member of `oneof`_ ``_accurate_time_config``. """ class CgroupMode(proto.Enum): @@ -601,6 +622,72 @@ class HugepagesConfig(proto.Message): optional=True, ) + class CustomNodeInit(proto.Message): + r"""Support for running custom init code while bootstrapping + nodes. + + Attributes: + init_script (google.cloud.container_v1beta1.types.LinuxNodeConfig.CustomNodeInit.InitScript): + Optional. The init script to be executed on + the node. + """ + + class InitScript(proto.Message): + r"""InitScript provide a simply bash script to be executed on the + node. + + Attributes: + gcs_uri (str): + The Cloud Storage URI for storing the init script. Format: + gs://BUCKET_NAME/OBJECT_NAME The service account on the node + pool must have read access to the object. User can't + configure both gcs_uri and gcp_secret_manager_secret_uri. + gcs_generation (int): + The generation of the init script stored in Gloud Storage. + This is the required field to identify the version of the + init script. User can get the genetaion from + ``gcloud storage objects describe gs://BUCKET_NAME/OBJECT_NAME --format="value(generation)"`` + or from the "Version history" tab of the object in the Cloud + Console UI. + args (MutableSequence[str]): + Optional. The optional arguments line to be + passed to the init script. + gcp_secret_manager_secret_uri (str): + The resource name of the secret manager secret hosting the + init script. Both global and regional secrets are supported + with format below: Global secret: + projects/{project}/secrets/{secret}/versions/{version} + Regional secret: + projects/{project}/locations/{location}/secrets/{secret}/versions/{version} + Example: projects/1234567890/secrets/script_1/versions/1. + Accept version number only, not support version alias. User + can't configure both gcp_secret_manager_secret_uri and + gcs_uri. + """ + + gcs_uri: str = proto.Field( + proto.STRING, + number=1, + ) + gcs_generation: int = proto.Field( + proto.INT64, + number=2, + ) + args: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + gcp_secret_manager_secret_uri: str = proto.Field( + proto.STRING, + number=4, + ) + + init_script: "LinuxNodeConfig.CustomNodeInit.InitScript" = proto.Field( + proto.MESSAGE, + number=1, + message="LinuxNodeConfig.CustomNodeInit.InitScript", + ) + class SwapConfig(proto.Message): r"""Configuration for swap memory on a node pool. @@ -781,7 +868,7 @@ class NodeKernelModuleLoading(proto.Message): class Policy(proto.Enum): r"""Defines the kernel module loading policy for nodes in the - nodepool. + node pool. Values: POLICY_UNSPECIFIED (0): @@ -815,6 +902,27 @@ class Policy(proto.Enum): enum="LinuxNodeConfig.NodeKernelModuleLoading.Policy", ) + class AccurateTimeConfig(proto.Message): + r"""AccurateTimeConfig contains configuration for the accurate + time synchronization feature. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + enable_ptp_kvm_time_sync (bool): + Enables enhanced time synchronization using + PTP-KVM. + + This field is a member of `oneof`_ ``_enable_ptp_kvm_time_sync``. + """ + + enable_ptp_kvm_time_sync: bool = proto.Field( + proto.BOOL, + number=1, + optional=True, + ) + sysctls: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, @@ -841,6 +949,11 @@ class Policy(proto.Enum): number=5, enum=TransparentHugepageDefrag, ) + custom_node_init: CustomNodeInit = proto.Field( + proto.MESSAGE, + number=11, + message=CustomNodeInit, + ) swap_config: SwapConfig = proto.Field( proto.MESSAGE, number=12, @@ -852,6 +965,12 @@ class Policy(proto.Enum): number=13, message=NodeKernelModuleLoading, ) + accurate_time_config: AccurateTimeConfig = proto.Field( + proto.MESSAGE, + number=14, + optional=True, + message=AccurateTimeConfig, + ) class WindowsNodeConfig(proto.Message): @@ -1081,6 +1200,10 @@ class NodeKubeletConfig(proto.Message): individually instead of as a group. This field is a member of `oneof`_ ``_single_process_oom_kill``. + crash_loop_back_off (google.cloud.container_v1beta1.types.NodeKubeletConfig.CrashLoopBackOffConfig): + Optional. Contains configuration options to + modify node-level parameters for container + restart behavior. shutdown_grace_period_seconds (int): Optional. shutdown_grace_period_seconds is the maximum allowed grace period (in seconds) the total duration that @@ -1094,7 +1217,7 @@ class NodeKubeletConfig(proto.Message): This field is a member of `oneof`_ ``_shutdown_grace_period_seconds``. shutdown_grace_period_critical_pods_seconds (int): - Optional. shutdown_grace_period_critical_pod_seconds is the + Optional. shutdown_grace_period_critical_pods_seconds is the maximum allowed grace period (in seconds) used to terminate critical pods during a node shutdown. This value should be <= shutdown_grace_period_seconds, and is only valid if @@ -1105,6 +1228,34 @@ class NodeKubeletConfig(proto.Message): This field is a member of `oneof`_ ``_shutdown_grace_period_critical_pods_seconds``. """ + class CrashLoopBackOffConfig(proto.Message): + r"""Contains config to modify node-level parameters for container + restart behavior. + + Attributes: + max_container_restart_period (str): + Optional. The maximum duration the backoff + delay can accrue to for container restarts, + minimum 1 second, maximum 300 seconds. If not + set, defaults to the internal crashloopbackoff + maximum. + + The string must be a sequence of decimal + numbers, each with optional fraction and a unit + suffix, such as "300ms". + Valid time units are "ns", "us" (or "µs"), "ms", + "s", "m", "h". + + See + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#configurable-container-restart-delay + for more details. + """ + + max_container_restart_period: str = proto.Field( + proto.STRING, + number=1, + ) + cpu_manager_policy: str = proto.Field( proto.STRING, number=1, @@ -1193,6 +1344,11 @@ class NodeKubeletConfig(proto.Message): number=22, optional=True, ) + crash_loop_back_off: CrashLoopBackOffConfig = proto.Field( + proto.MESSAGE, + number=24, + message=CrashLoopBackOffConfig, + ) shutdown_grace_period_seconds: int = proto.Field( proto.INT32, number=26, @@ -1591,18 +1747,25 @@ class NodeConfig(proto.Message): https://cloud.google.com/kubernetes-engine/docs/concepts/node-images for available image types. labels (MutableMapping[str, str]): - The map of Kubernetes labels (key/value - pairs) to be applied to each node. These will - added in addition to any default label(s) that - Kubernetes may apply to the node. - In case of conflict in label keys, the applied - set may differ depending on the Kubernetes - version -- it's best to assume the behavior is - undefined and conflicts should be avoided. - For more information, including usage and the - valid values, see: - - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + The Kubernetes labels (key/value pairs) to apply to each + node. The values in this field are added to the set of + default labels Kubernetes applies to nodes. + + This field has the following restrictions: + + - Labels must use a valid Kubernetes syntax and character + set, as defined in + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set. + - This field supports up to 1,024 total characters in a + single request. + + Depending on the Kubernetes version, keys in this field + might conflict with the keys of the default labels, which + might change which of your labels are applied to the nodes. + Assume that the behavior is unpredictable and avoid label + key conflicts. For more information about the default + labels, see: + https://kubernetes.io/docs/reference/labels-annotations-taints/ local_ssd_count (int): The number of local SSD disks to be attached to the node. @@ -1774,6 +1937,11 @@ class NodeConfig(proto.Message): underutilized nodes. If not set, nodes are scaled down by default behavior, i.e. according to the chosen autoscaling profile. + taint_config (google.cloud.container_v1beta1.types.TaintConfig): + Optional. The taint configuration for the + node pool. + + This field is a member of `oneof`_ ``_taint_config``. """ class LocalSsdEncryptionMode(proto.Enum): @@ -2056,6 +2224,55 @@ class EffectiveCgroupMode(proto.Enum): number=60, message=duration_pb2.Duration, ) + taint_config: "TaintConfig" = proto.Field( + proto.MESSAGE, + number=62, + optional=True, + message="TaintConfig", + ) + + +class TaintConfig(proto.Message): + r"""TaintConfig contains the configuration for the taints of the + node pool. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + architecture_taint_behavior (google.cloud.container_v1beta1.types.TaintConfig.ArchitectureTaintBehavior): + Optional. Controls architecture tainting + behavior. + + This field is a member of `oneof`_ ``_architecture_taint_behavior``. + """ + + class ArchitectureTaintBehavior(proto.Enum): + r"""Controls architecture tainting behavior for a node pool. + New values may be added in the future. + + Values: + ARCHITECTURE_TAINT_BEHAVIOR_UNSPECIFIED (0): + Specifies that the behavior is unspecified, + defaults to ARM. + NONE (1): + Disables default architecture taints on the + node pool. + ARM (2): + Taints all the nodes in the node pool with + the default ARM taint. + """ + + ARCHITECTURE_TAINT_BEHAVIOR_UNSPECIFIED = 0 + NONE = 1 + ARM = 2 + + architecture_taint_behavior: ArchitectureTaintBehavior = proto.Field( + proto.ENUM, + number=2, + optional=True, + enum=ArchitectureTaintBehavior, + ) class AdvancedMachineFeatures(proto.Message): @@ -2186,7 +2403,7 @@ class NodeNetworkConfig(proto.Message): This field is a member of `oneof`_ ``_network_performance_config``. pod_cidr_overprovision_config (google.cloud.container_v1beta1.types.PodCIDROverprovisionConfig): [PRIVATE FIELD] Pod CIDR size overprovisioning config for - the nodepool. + the node pool. Pod CIDR size per node depends on max_pods_per_node. By default, the value of max_pods_per_node is rounded off to @@ -2212,20 +2429,21 @@ class NodeNetworkConfig(proto.Message): The ratio is Usage/[Total number of IPs in the secondary range], Usage=numNodes\ *numZones*\ podIPsPerNode. subnetwork (str): - Optional. The subnetwork name/path for the - node pool. Format: + Optional. The subnetwork name/path for the node pool. + Format: projects/{project}/regions/{region}/subnetworks/{subnetwork} - If the cluster is associated with multiple - subnetworks, the subnetwork can be either: - - 1. A user supplied subnetwork name/full path - during node pool creation. Example1: - my-subnet - Example2: - projects/gke-project/regions/us-central1/subnetworks/my-subnet - 2. A subnetwork path picked based on the IP - utilization during node pool creation and - is immutable. + If the cluster is associated with multiple subnetworks, the + subnetwork can be either: + + - A user supplied subnetwork name during node pool creation + (e.g., ``my-subnet``). The name must be between 1 and 63 + characters long, start with a letter, contain only + letters, numbers, and hyphens, and end with a letter or a + number. + - A full subnetwork path during node pool creation, such as + ``projects/gke-project/regions/us-central1/subnetworks/my-subnet`` + - A subnetwork path picked based on the IP utilization + during node pool creation and is immutable. network_tier_config (google.cloud.container_v1beta1.types.NetworkTierConfig): Output only. The network tier configuration for the node pool inherits from the @@ -2774,13 +2992,11 @@ class CertificateAuthorityDomainConfig(proto.Message): Attributes: fqdns (MutableSequence[str]): - List of fully qualified domain names (FQDN). - Specifying port is supported. - Wildcards are NOT supported. - Examples: + List of fully qualified domain names (FQDN). Specifying port + is supported. Wildcards are NOT supported. Examples: - - my.customdomain.com - - 10.0.1.2:5000 + - ``my.customdomain.com`` + - ``10.0.1.2:5000`` gcp_secret_manager_certificate_config (google.cloud.container_v1beta1.types.ContainerdConfig.PrivateRegistryAccessConfig.CertificateAuthorityDomainConfig.GCPSecretManagerCertificateConfig): Secret Manager certificate configuration. @@ -2848,18 +3064,15 @@ class RegistryHostConfig(proto.Message): Attributes: server (str): - Defines the host name of the registry server, - which will be used to create configuration file - as /etc/containerd/hosts.d//hosts.toml. - It supports fully qualified domain names (FQDN) - and IP addresses: - - Specifying port is supported. - Wildcards are NOT supported. - Examples: - - - my.customdomain.com - - 10.0.1.2:5000 + Defines the host name of the registry server, which will be + used to create configuration file as + /etc/containerd/hosts.d//hosts.toml. It supports fully + qualified domain names (FQDN) and IP addresses: Specifying + port is supported, while scheme and path are NOT supported. + Wildcards are NOT supported. Examples: + + - ``my.customdomain.com`` + - ``10.0.1.2:5000`` hosts (MutableSequence[google.cloud.container_v1beta1.types.ContainerdConfig.RegistryHostConfig.HostConfig]): HostConfig configures a list of host-specific configurations for the server. @@ -2961,16 +3174,14 @@ class HostConfig(proto.Message): Attributes: host (str): - Host configures the registry host/mirror. - It supports fully qualified domain names (FQDN) - and IP addresses: - - Specifying port is supported. - Wildcards are NOT supported. - Examples: - - - my.customdomain.com - - 10.0.1.2:5000 + Host configures the registry host/mirror. It supports fully + qualified domain names (FQDNs) and IP addresses. Specifying + scheme, port or path is supported. Scheme can only be http + or https. Wildcards are NOT supported. Examples: + + - ``my.customdomain.com`` + - ``https://my.customdomain.com/path`` + - ``10.0.1.2:5000`` capabilities (MutableSequence[google.cloud.container_v1beta1.types.ContainerdConfig.RegistryHostConfig.HostCapability]): Capabilities represent the capabilities of the registry host, specifying what operations a @@ -3465,9 +3676,17 @@ class AddonsConfig(proto.Message): Configuration for the Lustre CSI driver. pod_snapshot_config (google.cloud.container_v1beta1.types.PodSnapshotConfig): Configuration for the Pod Snapshot feature. + slurm_operator_config (google.cloud.container_v1beta1.types.SlurmOperatorConfig): + Configuration for the Slurm Operator. slice_controller_config (google.cloud.container_v1beta1.types.SliceControllerConfig): Optional. Configuration for the slice controller add-on. + agent_sandbox_config (google.cloud.container_v1beta1.types.AgentSandboxConfig): + Optional. Configuration for the AgentSandbox + addon. + node_readiness_config (google.cloud.container_v1beta1.types.NodeReadinessConfig): + Optional. Configuration for + NodeReadinessController add-on. """ http_load_balancing: "HttpLoadBalancing" = proto.Field( @@ -3567,11 +3786,26 @@ class AddonsConfig(proto.Message): number=24, message="PodSnapshotConfig", ) + slurm_operator_config: "SlurmOperatorConfig" = proto.Field( + proto.MESSAGE, + number=25, + message="SlurmOperatorConfig", + ) slice_controller_config: "SliceControllerConfig" = proto.Field( proto.MESSAGE, number=26, message="SliceControllerConfig", ) + agent_sandbox_config: "AgentSandboxConfig" = proto.Field( + proto.MESSAGE, + number=28, + message="AgentSandboxConfig", + ) + node_readiness_config: "NodeReadinessConfig" = proto.Field( + proto.MESSAGE, + number=29, + message="NodeReadinessConfig", + ) class HttpLoadBalancing(proto.Message): @@ -3813,6 +4047,14 @@ class LustreCsiDriverConfig(proto.Message): longer required as of GKE node version 1.33.2-gke.4655000, unless you are connecting to a Lustre instance that has the ``gke-support-enabled`` flag. + disable_multi_nic (bool): + When set to true, this disables multi-NIC + support for the Lustre CSI driver. + By default, GKE enables multi-NIC support, which + allows the Lustre CSI driver to automatically + detect and configure all suitable network + interfaces on a node to maximize I/O performance + for demanding workloads. """ enabled: bool = proto.Field( @@ -3823,6 +4065,55 @@ class LustreCsiDriverConfig(proto.Message): proto.BOOL, number=3, ) + disable_multi_nic: bool = proto.Field( + proto.BOOL, + number=4, + ) + + +class SlurmOperatorConfig(proto.Message): + r"""Configuration for the Slurm Operator. + + Attributes: + enabled (bool): + Whether the Slurm Operator is enabled in the + cluster. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + +class AgentSandboxConfig(proto.Message): + r"""Configuration for the AgentSandbox addon. + + Attributes: + enabled (bool): + Optional. Whether AgentSandbox is enabled for + this cluster. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + +class NodeReadinessConfig(proto.Message): + r"""Configuration for the GKE Node Readiness Controller. + + Attributes: + enabled (bool): + Optional. Whether the GKE Node Readiness + Controller is enabled for this cluster. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) class SliceControllerConfig(proto.Message): @@ -4978,6 +5269,10 @@ class Cluster(proto.Message): The rollback safe upgrade information of the cluster. This field is used when user manually triggers a rollback safe upgrade. + node_pool_upgrade_concurrency_config (google.cloud.container_v1beta1.types.NodePoolUpgradeConcurrencyConfig): + The node pool upgrade concurrency config of + the cluster. This field is used for auto + upgrade. current_node_version (str): Output only. Deprecated, use `NodePool.version `__ @@ -5078,8 +5373,8 @@ class Cluster(proto.Message): fleet (google.cloud.container_v1beta1.types.Fleet): Fleet information for the cluster. security_posture_config (google.cloud.container_v1beta1.types.SecurityPostureConfig): - Enable/Disable Security Posture API features - for the cluster. + Optional. Enable/Disable Security Posture API + features for the cluster. control_plane_endpoints_config (google.cloud.container_v1beta1.types.ControlPlaneEndpointsConfig): Configuration for all cluster's control plane endpoints. @@ -5091,8 +5386,12 @@ class Cluster(proto.Message): secret_manager_config (google.cloud.container_v1beta1.types.SecretManagerConfig): Secret CSI driver configuration. compliance_posture_config (google.cloud.container_v1beta1.types.CompliancePostureConfig): - Enable/Disable Compliance Posture features - for the cluster. + Optional. Deprecated: Compliance Posture is + no longer supported. For more details, see + https://cloud.google.com/kubernetes-engine/docs/deprecations/posture-management-deprecation. + + Enable/Disable Compliance Posture features for + the cluster. satisfies_pzs (bool): Output only. Reserved for future use. @@ -5117,12 +5416,24 @@ class Cluster(proto.Message): anonymous_authentication_config (google.cloud.container_v1beta1.types.AnonymousAuthenticationConfig): Configuration for limiting anonymous access to all endpoints except the health checks. + schedule_upgrade_config (google.cloud.container_v1beta1.types.ScheduleUpgradeConfig): + Optional. Configuration for scheduled + upgrades. secret_sync_config (google.cloud.container_v1beta1.types.SecretSyncConfig): Configuration for sync Secret Manager secrets as k8s secrets. managed_opentelemetry_config (google.cloud.container_v1beta1.types.ManagedOpenTelemetryConfig): Configuration for Managed OpenTelemetry pipeline. + control_plane_egress (google.cloud.container_v1beta1.types.ControlPlaneEgress): + Configuration for control plane egress + control. + managed_machine_learning_diagnostics_config (google.cloud.container_v1beta1.types.ManagedMachineLearningDiagnosticsConfig): + Configuration for managed machine learning + diagnostics. + node_creation_config (google.cloud.container_v1beta1.types.NodeCreationConfig): + Optional. Configuration for Node Creation + Mode. """ class Status(proto.Enum): @@ -5407,6 +5718,13 @@ class Status(proto.Enum): number=170, message="RollbackSafeUpgrade", ) + node_pool_upgrade_concurrency_config: "NodePoolUpgradeConcurrencyConfig" = ( + proto.Field( + proto.MESSAGE, + number=172, + message="NodePoolUpgradeConcurrencyConfig", + ) + ) current_node_version: str = proto.Field( proto.STRING, number=105, @@ -5578,6 +5896,11 @@ class Status(proto.Enum): number=164, message="AnonymousAuthenticationConfig", ) + schedule_upgrade_config: "ScheduleUpgradeConfig" = proto.Field( + proto.MESSAGE, + number=165, + message="ScheduleUpgradeConfig", + ) secret_sync_config: "SecretSyncConfig" = proto.Field( proto.MESSAGE, number=166, @@ -5588,6 +5911,88 @@ class Status(proto.Enum): number=168, message="ManagedOpenTelemetryConfig", ) + control_plane_egress: "ControlPlaneEgress" = proto.Field( + proto.MESSAGE, + number=169, + message="ControlPlaneEgress", + ) + managed_machine_learning_diagnostics_config: "ManagedMachineLearningDiagnosticsConfig" = proto.Field( + proto.MESSAGE, + number=171, + message="ManagedMachineLearningDiagnosticsConfig", + ) + node_creation_config: "NodeCreationConfig" = proto.Field( + proto.MESSAGE, + number=174, + message="NodeCreationConfig", + ) + + +class NodeCreationConfig(proto.Message): + r"""NodeCreationConfig defines the settings of node creation + mode. + + Attributes: + node_creation_mode (google.cloud.container_v1beta1.types.NodeCreationConfig.Mode): + The mode of node creation. + """ + + class Mode(proto.Enum): + r"""The mode of node creation. + + Values: + MODE_UNSPECIFIED (0): + When no user input is provided. + VIA_KUBELET (1): + Kubelet registers itself. + VIA_CONTROL_PLANE (2): + gcp-controller-manager automatically creates + the node object after CSR approval. + """ + + MODE_UNSPECIFIED = 0 + VIA_KUBELET = 1 + VIA_CONTROL_PLANE = 2 + + node_creation_mode: Mode = proto.Field( + proto.ENUM, + number=1, + enum=Mode, + ) + + +class ControlPlaneEgress(proto.Message): + r"""ControlPlaneEgress defines the settings needed to enable + control plane egress control. + + Attributes: + mode (google.cloud.container_v1beta1.types.ControlPlaneEgress.Mode): + Defines the mode of control plane egress. + """ + + class Mode(proto.Enum): + r"""Mode defines the mode of control plane egress. + + Values: + MODE_UNSPECIFIED (0): + Default value not specified. + VIA_CONTROL_PLANE (1): + Control plane has public IP and no + restriction on egress. + NONE (2): + No public IP on control plane and only + internal allowlisted egress. + """ + + MODE_UNSPECIFIED = 0 + VIA_CONTROL_PLANE = 1 + NONE = 2 + + mode: Mode = proto.Field( + proto.ENUM, + number=1, + enum=Mode, + ) class RBACBindingConfig(proto.Message): @@ -5746,7 +6151,11 @@ class Mode(proto.Enum): class CompliancePostureConfig(proto.Message): - r"""CompliancePostureConfig defines the settings needed to + r"""Deprecated: Compliance Posture is no longer supported. + For more details, see + https://cloud.google.com/kubernetes-engine/docs/deprecations/posture-management-deprecation. + + CompliancePostureConfig defines the settings needed to enable/disable features for the Compliance Posture. @@ -5965,6 +6374,11 @@ class Mode(proto.Enum): Applies Security Posture features on the cluster. ENTERPRISE (3): + Deprecated: Security Posture Enterprise + features are no longer supported. For more + details, see + https://cloud.google.com/kubernetes-engine/docs/deprecations/posture-management-deprecation. + Applies the Security Posture off cluster Enterprise level features. """ @@ -5985,6 +6399,10 @@ class VulnerabilityMode(proto.Enum): Disables vulnerability scanning on the cluster. VULNERABILITY_BASIC (2): + Deprecated: Basic vulnerability scanning is + no longer supported. For more details, see + https://cloud.google.com/kubernetes-engine/docs/deprecations/posture-management-deprecation. + Applies basic vulnerability scanning on the cluster. VULNERABILITY_ENTERPRISE (3): @@ -6441,8 +6859,12 @@ class ClusterUpdate(proto.Message): This field is a member of `oneof`_ ``_desired_secret_manager_config``. desired_compliance_posture_config (google.cloud.container_v1beta1.types.CompliancePostureConfig): - Enable/Disable Compliance Posture features - for the cluster. + Deprecated: Compliance Posture is no longer + supported. For more details, see + https://cloud.google.com/kubernetes-engine/docs/deprecations/posture-management-deprecation. + + Enable/Disable Compliance Posture features for + the cluster. This field is a member of `oneof`_ ``_desired_compliance_posture_config``. desired_node_kubelet_config (google.cloud.container_v1beta1.types.NodeKubeletConfig): @@ -6498,18 +6920,36 @@ class ClusterUpdate(proto.Message): desired_network_tier_config (google.cloud.container_v1beta1.types.NetworkTierConfig): The desired network tier configuration for the cluster. + desired_schedule_upgrade_config (google.cloud.container_v1beta1.types.ScheduleUpgradeConfig): + Optional. The desired scheduled upgrades + configuration for the cluster. desired_secret_sync_config (google.cloud.container_v1beta1.types.SecretSyncConfig): Configuration for sync Secret Manager secrets as k8s secrets. desired_privileged_admission_config (google.cloud.container_v1beta1.types.PrivilegedAdmissionConfig): The desired privileged admission config for the cluster. + desired_control_plane_egress (google.cloud.container_v1beta1.types.ControlPlaneEgress): + The desired control plane egress control + config for the cluster. desired_rollback_safe_upgrade (google.cloud.container_v1beta1.types.RollbackSafeUpgrade): The desired rollback safe upgrade configuration. + desired_node_pool_upgrade_concurrency_config (google.cloud.container_v1beta1.types.NodePoolUpgradeConcurrencyConfig): + The desired node pool upgrade concurrency + configuration. desired_managed_opentelemetry_config (google.cloud.container_v1beta1.types.ManagedOpenTelemetryConfig): The desired managed open telemetry configuration. + desired_autopilot_cluster_policy_config (google.cloud.container_v1beta1.types.ClusterPolicyConfig): + The desired autopilot cluster policies that + to be enforced in the cluster. + desired_managed_machine_learning_diagnostics_config (google.cloud.container_v1beta1.types.ManagedMachineLearningDiagnosticsConfig): + The desired managed machine learning + diagnostics configuration. + desired_node_creation_config (google.cloud.container_v1beta1.types.NodeCreationConfig): + Optional. The desired NodeCreationConfig for + the cluster. """ desired_node_version: str = proto.Field( @@ -6922,6 +7362,11 @@ class ClusterUpdate(proto.Message): number=155, message="NetworkTierConfig", ) + desired_schedule_upgrade_config: "ScheduleUpgradeConfig" = proto.Field( + proto.MESSAGE, + number=157, + message="ScheduleUpgradeConfig", + ) desired_secret_sync_config: "SecretSyncConfig" = proto.Field( proto.MESSAGE, number=158, @@ -6932,16 +7377,43 @@ class ClusterUpdate(proto.Message): number=159, message="PrivilegedAdmissionConfig", ) + desired_control_plane_egress: "ControlPlaneEgress" = proto.Field( + proto.MESSAGE, + number=160, + message="ControlPlaneEgress", + ) desired_rollback_safe_upgrade: "RollbackSafeUpgrade" = proto.Field( proto.MESSAGE, number=161, message="RollbackSafeUpgrade", ) + desired_node_pool_upgrade_concurrency_config: "NodePoolUpgradeConcurrencyConfig" = ( + proto.Field( + proto.MESSAGE, + number=167, + message="NodePoolUpgradeConcurrencyConfig", + ) + ) desired_managed_opentelemetry_config: "ManagedOpenTelemetryConfig" = proto.Field( proto.MESSAGE, number=163, message="ManagedOpenTelemetryConfig", ) + desired_autopilot_cluster_policy_config: "ClusterPolicyConfig" = proto.Field( + proto.MESSAGE, + number=164, + message="ClusterPolicyConfig", + ) + desired_managed_machine_learning_diagnostics_config: "ManagedMachineLearningDiagnosticsConfig" = proto.Field( + proto.MESSAGE, + number=166, + message="ManagedMachineLearningDiagnosticsConfig", + ) + desired_node_creation_config: "NodeCreationConfig" = proto.Field( + proto.MESSAGE, + number=171, + message="NodeCreationConfig", + ) class AdditionalPodRangesConfig(proto.Message): @@ -6991,7 +7463,7 @@ class AdditionalIPRangesConfig(proto.Message): class Status(proto.Enum): r"""Additional subnet with DRAINING status will not be selected during new node pool creation. To undrain the draining status, update the - cluster to set the sunbet to ACTIVE status. To remove the additional + cluster to set the subnet to ACTIVE status. To remove the additional subnet, use the update cluster API to remove the subnet from the desired_additional_ip_ranges list. IP ranges can be removed regardless of its status, as long as no node pools are using them. @@ -7818,6 +8290,8 @@ class UpdateNodePoolRequest(proto.Message): underutilized nodes. If not set, nodes are scaled down by default behavior, i.e. according to the chosen autoscaling profile. + taint_config (google.cloud.container_v1beta1.types.TaintConfig): + The taint configuration for the node pool. """ project_id: str = proto.Field( @@ -7992,6 +8466,11 @@ class UpdateNodePoolRequest(proto.Message): number=49, message=duration_pb2.Duration, ) + taint_config: "TaintConfig" = proto.Field( + proto.MESSAGE, + number=51, + message="TaintConfig", + ) class SetNodePoolAutoscalingRequest(proto.Message): @@ -9301,6 +9780,9 @@ class NodePool(proto.Message): node_drain_config (google.cloud.container_v1beta1.types.NodePool.NodeDrainConfig): Specifies the node drain configuration for this node pool. + maintenance_policy (google.cloud.container_v1beta1.types.NodePool.NodePoolMaintenancePolicy): + Optional. Specifies the maintenance policy + for the node pool. """ class Status(proto.Enum): @@ -9590,7 +10072,7 @@ class QueuedProvisioning(proto.Message): Attributes: enabled (bool): - Denotes that this nodepool is QRM specific, + Denotes that this node pool is QRM specific, meaning nodes can be only obtained through queuing via the Cluster Autoscaler ProvisioningRequest API. @@ -9603,12 +10085,22 @@ class QueuedProvisioning(proto.Message): class NodeDrainConfig(proto.Message): r"""NodeDrainConfig contains the node drain related - configurations for this nodepool. + configurations for this node pool. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + pdb_timeout_duration (google.protobuf.duration_pb2.Duration): + The duration of the PDB timeout period for + node drain. + + This field is a member of `oneof`_ ``_pdb_timeout_duration``. + grace_termination_duration (google.protobuf.duration_pb2.Duration): + The duration of the grace termination period + for node drain. + + This field is a member of `oneof`_ ``_grace_termination_duration``. respect_pdb_during_node_pool_deletion (bool): Whether to respect PDB during node pool deletion. @@ -9616,12 +10108,75 @@ class NodeDrainConfig(proto.Message): This field is a member of `oneof`_ ``_respect_pdb_during_node_pool_deletion``. """ + pdb_timeout_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + optional=True, + message=duration_pb2.Duration, + ) + grace_termination_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message=duration_pb2.Duration, + ) respect_pdb_during_node_pool_deletion: bool = proto.Field( proto.BOOL, number=3, optional=True, ) + class ExclusionUntilEndOfSupport(proto.Message): + r"""Defines the maintenance exclusion for the node pool. + + Attributes: + enabled (bool): + Optional. Indicates whether the exclusion is + enabled. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The start time of the + maintenance exclusion. It is output only. It is + the exclusion creation time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The end time of the maintenance + exclusion. It is output only. It is the cluster + control plane version's end of support time, or + end of extended support time when the cluster is + on extended support channel. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + class NodePoolMaintenancePolicy(proto.Message): + r"""Defines the maintenance policy for the node pool. + + Attributes: + exclusion_until_end_of_support (google.cloud.container_v1beta1.types.NodePool.ExclusionUntilEndOfSupport): + Optional. The exclusion until end of support + for the node pool. + """ + + exclusion_until_end_of_support: "NodePool.ExclusionUntilEndOfSupport" = ( + proto.Field( + proto.MESSAGE, + number=1, + message="NodePool.ExclusionUntilEndOfSupport", + ) + ) + name: str = proto.Field( proto.STRING, number=1, @@ -9723,6 +10278,11 @@ class NodeDrainConfig(proto.Message): number=116, message=NodeDrainConfig, ) + maintenance_policy: NodePoolMaintenancePolicy = proto.Field( + proto.MESSAGE, + number=118, + message=NodePoolMaintenancePolicy, + ) class NodeManagement(proto.Message): @@ -9798,6 +10358,9 @@ class MaintenancePolicy(proto.Message): a ``get()`` request to the cluster to get the current resource version and include it with requests to set the policy. + disruption_budget (google.cloud.container_v1beta1.types.DisruptionBudget): + Optional. The upgrade disruption budget for + the cluster control plane. """ window: "MaintenanceWindow" = proto.Field( @@ -9809,6 +10372,52 @@ class MaintenancePolicy(proto.Message): proto.STRING, number=3, ) + disruption_budget: "DisruptionBudget" = proto.Field( + proto.MESSAGE, + number=4, + message="DisruptionBudget", + ) + + +class DisruptionBudget(proto.Message): + r"""DisruptionBudget defines the upgrade disruption budget for + the cluster control plane. + + Attributes: + minor_version_disruption_interval (google.protobuf.duration_pb2.Duration): + Optional. The minimum duration between two + minor version upgrades of the control plane. + patch_version_disruption_interval (google.protobuf.duration_pb2.Duration): + Optional. The minimum duration between two + patch version upgrades of the control plane. + last_minor_version_disruption_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The last time a minor version + upgrade was performed on the control plane. + last_disruption_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The last time a disruption was + performed on the control plane. + """ + + minor_version_disruption_interval: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + patch_version_disruption_interval: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + last_minor_version_disruption_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + last_disruption_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) class MaintenanceWindow(proto.Message): @@ -9835,6 +10444,15 @@ class MaintenanceWindow(proto.Message): maintenance windows are set, maintenance can occur at any time. + This field is a member of `oneof`_ ``policy``. + recurring_maintenance_window (google.cloud.container_v1beta1.types.RecurringMaintenanceWindow): + RecurringMaintenanceWindow specifies some + number of recurring time periods for maintenance + to occur. The time windows may be overlapping. + If no maintenance windows are set, maintenance + can occur at any time. Alternative to + RecurringWindow, with renamed fields. + This field is a member of `oneof`_ ``policy``. maintenance_exclusions (MutableMapping[str, google.cloud.container_v1beta1.types.TimeWindow]): Exceptions to maintenance window. @@ -9854,6 +10472,12 @@ class MaintenanceWindow(proto.Message): oneof="policy", message="RecurringTimeWindow", ) + recurring_maintenance_window: "RecurringMaintenanceWindow" = proto.Field( + proto.MESSAGE, + number=5, + oneof="policy", + message="RecurringMaintenanceWindow", + ) maintenance_exclusions: MutableMapping[str, "TimeWindow"] = proto.MapField( proto.STRING, proto.MESSAGE, @@ -10018,6 +10642,68 @@ class RecurringTimeWindow(proto.Message): ) +class RecurringMaintenanceWindow(proto.Message): + r"""Represents an arbitrary window of time that recurs. + Alternative to RecurringTimeWindow, with renamed fields. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + delay_until (google.type.date_pb2.Date): + Optional. Windows will not be scheduled + before that day. Depending on the recurrence, + this may be the date the first window appears. + Days are measured in the UTC timezone. This + setting must be used when INTERVAL>1 or + FREQ=WEEKLY/MONTHLY and no BYDAY specified. + + This field is a member of `oneof`_ ``_delay_until``. + window_start_time (google.type.timeofday_pb2.TimeOfDay): + Required. Start time of the window on days + that it is scheduled, assuming UTC timezone. + window_duration (google.protobuf.duration_pb2.Duration): + Required. Duration of the window. + recurrence (str): + Required. An RRULE + (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for + how this window reccurs. + + For example, to have something repeat every weekday, you'd + use: ``FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR`` + + To repeat some window daily (equivalent to the + DailyMaintenanceWindow): ``FREQ=DAILY`` + + For the first weekend of every month: + ``FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU`` + + The FREQ values of HOURLY, MINUTELY, and SECONDLY are not + supported. + """ + + delay_until: date_pb2.Date = proto.Field( + proto.MESSAGE, + number=1, + optional=True, + message=date_pb2.Date, + ) + window_start_time: timeofday_pb2.TimeOfDay = proto.Field( + proto.MESSAGE, + number=2, + message=timeofday_pb2.TimeOfDay, + ) + window_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + recurrence: str = proto.Field( + proto.STRING, + number=4, + ) + + class DailyMaintenanceWindow(proto.Message): r"""Time window specified for daily maintenance operations. @@ -10308,10 +10994,13 @@ class AutopilotGeneralProfile(proto.Enum): Use default configuration. NO_PERFORMANCE (1): Avoid extra IP consumption. + NONE (2): + Use default configuration. """ AUTOPILOT_GENERAL_PROFILE_UNSPECIFIED = 0 NO_PERFORMANCE = 1 + NONE = 2 enable_node_autoprovisioning: bool = proto.Field( proto.BOOL, @@ -10548,8 +11237,8 @@ class NodePoolAutoscaling(proto.Message): autoprovisioned (bool): Can this node pool be deleted automatically. location_policy (google.cloud.container_v1beta1.types.NodePoolAutoscaling.LocationPolicy): - Location policy used when scaling up a - nodepool. + Location policy used when scaling up a node + pool. total_min_node_count (int): Minimum number of nodes in the node pool. Must be greater than or equal to 0 and less than or equal to @@ -10565,7 +11254,7 @@ class NodePoolAutoscaling(proto.Message): class LocationPolicy(proto.Enum): r"""Location policy specifies how zones are picked when scaling - up the nodepool. + up the node pool. Values: LOCATION_POLICY_UNSPECIFIED (0): @@ -12124,11 +12813,17 @@ class State(proto.Enum): Secrets in etcd are stored in plain text (at etcd level) - this is unrelated to Compute Engine level full disk encryption. + ALL_OBJECTS_ENCRYPTION_ENABLED (3): + Encryption of all objects in the storage is + enabled. There is no guarantee that all objects + in the storage are encrypted, but eventually + they will be. """ UNKNOWN = 0 ENCRYPTED = 1 DECRYPTED = 2 + ALL_OBJECTS_ENCRYPTION_ENABLED = 3 class CurrentState(proto.Enum): r"""Current State of etcd encryption. @@ -12155,6 +12850,17 @@ class CurrentState(proto.Enum): CURRENT_STATE_DECRYPTION_ERROR (6): De-crypting Secrets to plain text in etcd encountered an error. + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_ENABLED (8): + Encryption of all objects in the storage is + enabled. It does not guarantee that all objects + in the storage are encrypted, but eventually + they will be. + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_PENDING (9): + Enablement of the encryption of all objects + in storage is pending. + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_ERROR (10): + Enabling encryption of all objects in storage + encountered an error. """ CURRENT_STATE_UNSPECIFIED = 0 @@ -12164,6 +12870,9 @@ class CurrentState(proto.Enum): CURRENT_STATE_ENCRYPTION_ERROR = 4 CURRENT_STATE_DECRYPTION_PENDING = 5 CURRENT_STATE_DECRYPTION_ERROR = 6 + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_ENABLED = 8 + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_PENDING = 9 + CURRENT_STATE_ALL_OBJECTS_ENCRYPTION_ERROR = 10 class OperationError(proto.Message): r"""OperationError records errors seen from CloudKMS keys @@ -12862,6 +13571,9 @@ class Autopilot(proto.Message): PrivilegedAdmissionConfig is the configuration related to privileged admission control. + cluster_policy_config (google.cloud.container_v1beta1.types.ClusterPolicyConfig): + ClusterPolicyConfig denotes cluster level + policies that are enforced for the cluster. """ enabled: bool = proto.Field( @@ -12883,6 +13595,63 @@ class Autopilot(proto.Message): number=4, message="PrivilegedAdmissionConfig", ) + cluster_policy_config: "ClusterPolicyConfig" = proto.Field( + proto.MESSAGE, + number=5, + message="ClusterPolicyConfig", + ) + + +class ClusterPolicyConfig(proto.Message): + r"""ClusterPolicyConfig stores the configuration for cluster wide + policies. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + no_system_mutation (bool): + Denotes that preventing creation and mutation + of resources in GKE managed namespaces and + cluster-scoped GKE managed resources . + + This field is a member of `oneof`_ ``_no_system_mutation``. + no_system_impersonation (bool): + Denotes preventing impersonation and CSRs for + GKE System users. + + This field is a member of `oneof`_ ``_no_system_impersonation``. + no_unsafe_webhooks (bool): + Denotes preventing unsafe webhooks. + + This field is a member of `oneof`_ ``_no_unsafe_webhooks``. + no_standard_node_pools (bool): + Denotes preventing standard node pools and + requiring only autopilot node pools. + + This field is a member of `oneof`_ ``_no_standard_node_pools``. + """ + + no_system_mutation: bool = proto.Field( + proto.BOOL, + number=1, + optional=True, + ) + no_system_impersonation: bool = proto.Field( + proto.BOOL, + number=2, + optional=True, + ) + no_unsafe_webhooks: bool = proto.Field( + proto.BOOL, + number=3, + optional=True, + ) + no_standard_node_pools: bool = proto.Field( + proto.BOOL, + number=5, + optional=True, + ) class PrivilegedAdmissionConfig(proto.Message): @@ -12944,6 +13713,28 @@ class WorkloadPolicyConfig(proto.Message): ) +class NodePoolUpgradeConcurrencyConfig(proto.Message): + r"""NodePoolUpgradeConcurrencyConfig is the configuration for the + node pool auto upgrade concurrency. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + max_count (int): + If set, no more than max_count node pools can be upgraded + concurrently. + + This field is a member of `oneof`_ ``concurrency``. + """ + + max_count: int = proto.Field( + proto.INT64, + number=1, + oneof="concurrency", + ) + + class NotificationConfig(proto.Message): r"""NotificationConfig is the configuration of notifications. @@ -13196,6 +13987,9 @@ class State(proto.Enum): Values: STATE_UNSPECIFIED (0): STATE_UNSPECIFIED indicates the state is unspecified. + SCHEDULED (1): + SCHEDULED indicates the upgrade was + scheduled. STARTED (3): STARTED indicates the upgrade has started. SUCCEEDED (4): @@ -13208,6 +14002,7 @@ class State(proto.Enum): """ STATE_UNSPECIFIED = 0 + SCHEDULED = 1 STARTED = 3 SUCCEEDED = 4 FAILED = 5 @@ -13741,7 +14536,7 @@ class RayClusterMonitoringConfig(proto.Message): class NodePoolLoggingConfig(proto.Message): r"""NodePoolLoggingConfig specifies logging configuration for - nodepools. + node pools. Attributes: variant_config (google.cloud.container_v1beta1.types.LoggingVariantConfig): @@ -14243,7 +15038,8 @@ class RotationConfig(proto.Message): class BootDisk(proto.Message): - r"""BootDisk specifies the boot disk configuration for nodepools. + r"""BootDisk specifies the boot disk configuration for node + pools. Attributes: disk_type (str): @@ -14641,12 +15437,12 @@ class StartType(proto.Enum): class FetchNodePoolUpgradeInfoRequest(proto.Message): r"""FetchNodePoolUpgradeInfoRequest fetches the upgrade - information of a nodepool. + information of a node pool. Attributes: name (str): - Required. The name (project, location, cluster, nodepool) of - the nodepool to get. Specified in the format + Required. The name (project, location, cluster, node pool) + of the node pool to get. Specified in the format ``projects/*/locations/*/clusters/*/nodePools/*`` or ``projects/*/zones/*/clusters/*/nodePools/*``. version (str): @@ -14666,7 +15462,7 @@ class FetchNodePoolUpgradeInfoRequest(proto.Message): class NodePoolUpgradeInfo(proto.Message): r"""NodePoolUpgradeInfo contains the upgrade information of a - nodepool. + node pool. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -14689,13 +15485,13 @@ class NodePoolUpgradeInfo(proto.Message): upgrade_details (MutableSequence[google.cloud.container_v1beta1.types.UpgradeDetails]): The list of past auto upgrades. end_of_standard_support_timestamp (str): - The nodepool's current minor version's end of - standard support timestamp. + The node pool's current minor version's end + of standard support timestamp. This field is a member of `oneof`_ ``_end_of_standard_support_timestamp``. end_of_extended_support_timestamp (str): - The nodepool's current minor version's end of - extended support timestamp. + The node pool's current minor version's end + of extended support timestamp. This field is a member of `oneof`_ ``_end_of_extended_support_timestamp``. """ @@ -14786,6 +15582,21 @@ class AutoUpgradePausedReason(proto.Enum): ) +class ScheduleUpgradeConfig(proto.Message): + r"""Configuration for scheduled upgrades on the cluster. + + Attributes: + enabled (bool): + Optional. Whether or not scheduled upgrades + are enabled. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + class GkeAutoUpgradeConfig(proto.Message): r"""GkeAutoUpgradeConfig is the configuration for GKE auto upgrades. @@ -14966,6 +15777,28 @@ class Scope(proto.Enum): ) +class ManagedMachineLearningDiagnosticsConfig(proto.Message): + r"""ManagedMachineLearningDiagnosticsConfig is the configuration + for the GKE Managed Machine Learning Diagnostics pipeline. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + enabled (bool): + Enable/Disable Managed Machine Learning + Diagnostics. + + This field is a member of `oneof`_ ``_enabled``. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + optional=True, + ) + + class PodSnapshotConfig(proto.Message): r"""PodSnapshotConfig is the configuration for GKE Pod Snapshots feature. diff --git a/packages/google-cloud-container/tests/unit/gapic/container_v1/test_cluster_manager.py b/packages/google-cloud-container/tests/unit/gapic/container_v1/test_cluster_manager.py index bff5870faa06..78673b98a580 100644 --- a/packages/google-cloud-container/tests/unit/gapic/container_v1/test_cluster_manager.py +++ b/packages/google-cloud-container/tests/unit/gapic/container_v1/test_cluster_manager.py @@ -43,6 +43,8 @@ import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore import google.rpc.code_pb2 as code_pb2 # type: ignore import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.date_pb2 as date_pb2 # type: ignore +import google.type.timeofday_pb2 as timeofday_pb2 # type: ignore from google.api_core import ( client_options, gapic_v1, @@ -1301,7 +1303,11 @@ def test_cluster_manager_client_create_channel_credentials_file( credentials=file_creds, credentials_file=None, quota_project_id=None, - default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ), scopes=None, default_host="container.googleapis.com", ssl_credentials=None, @@ -28076,7 +28082,11 @@ def test_cluster_manager_base_transport_with_credentials_file(): load_creds.assert_called_once_with( "credentials.json", scopes=None, - default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ), quota_project_id="octopus", ) @@ -28102,7 +28112,11 @@ def test_cluster_manager_auth_adc(): ClusterManagerClient() adc.assert_called_once_with( scopes=None, - default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ), quota_project_id=None, ) @@ -28122,7 +28136,11 @@ def test_cluster_manager_transport_auth_adc(transport_class): transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], - default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ), quota_project_id="octopus", ) @@ -28175,7 +28193,11 @@ def test_cluster_manager_transport_create_channel(transport_class, grpc_helpers) credentials=creds, credentials_file=None, quota_project_id="octopus", - default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ), scopes=["1", "2"], default_host="container.googleapis.com", ssl_credentials=None, diff --git a/packages/google-cloud-container/tests/unit/gapic/container_v1beta1/test_cluster_manager.py b/packages/google-cloud-container/tests/unit/gapic/container_v1beta1/test_cluster_manager.py index 4d3297676824..ff68944be80e 100644 --- a/packages/google-cloud-container/tests/unit/gapic/container_v1beta1/test_cluster_manager.py +++ b/packages/google-cloud-container/tests/unit/gapic/container_v1beta1/test_cluster_manager.py @@ -40,6 +40,8 @@ import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore import google.rpc.code_pb2 as code_pb2 # type: ignore import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.date_pb2 as date_pb2 # type: ignore +import google.type.timeofday_pb2 as timeofday_pb2 # type: ignore from google.api_core import ( client_options, gapic_v1, @@ -1281,7 +1283,11 @@ def test_cluster_manager_client_create_channel_credentials_file( credentials=file_creds, credentials_file=None, quota_project_id=None, - default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ), scopes=None, default_host="container.googleapis.com", ssl_credentials=None, @@ -18030,7 +18036,11 @@ def test_cluster_manager_base_transport_with_credentials_file(): load_creds.assert_called_once_with( "credentials.json", scopes=None, - default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ), quota_project_id="octopus", ) @@ -18056,7 +18066,11 @@ def test_cluster_manager_auth_adc(): ClusterManagerClient() adc.assert_called_once_with( scopes=None, - default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ), quota_project_id=None, ) @@ -18076,7 +18090,11 @@ def test_cluster_manager_transport_auth_adc(transport_class): transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], - default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ), quota_project_id="octopus", ) @@ -18128,7 +18146,11 @@ def test_cluster_manager_transport_create_channel(transport_class, grpc_helpers) credentials=creds, credentials_file=None, quota_project_id="octopus", - default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/container", + "https://www.googleapis.com/auth/container.read-only", + ), scopes=["1", "2"], default_host="container.googleapis.com", ssl_credentials=None, diff --git a/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics/__init__.py b/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics/__init__.py index ea0eae13b9fb..aeeb9bf70d24 100644 --- a/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics/__init__.py +++ b/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics/__init__.py @@ -45,6 +45,7 @@ LookerGoldenQuery, LookerQuery, MatchedQuery, + QueryParameter, QueryParameterValues, UserFunctions, ) @@ -140,6 +141,7 @@ "LookerGoldenQuery", "LookerQuery", "MatchedQuery", + "QueryParameter", "QueryParameterValues", "UserFunctions", "Conversation", diff --git a/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/__init__.py b/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/__init__.py index 847872932d83..b27f73f8a524 100644 --- a/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/__init__.py +++ b/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/__init__.py @@ -46,6 +46,7 @@ LookerGoldenQuery, LookerQuery, MatchedQuery, + QueryParameter, QueryParameterValues, UserFunctions, ) @@ -268,6 +269,7 @@ def _get_version(dependency_name): "OAuthCredentials", "OperationMetadata", "PrivateLookerInstanceInfo", + "QueryParameter", "QueryParameterValues", "Schema", "SchemaMessage", diff --git a/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/types/__init__.py b/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/types/__init__.py index 5f10b2180023..58f4c10426dc 100644 --- a/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/types/__init__.py +++ b/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/types/__init__.py @@ -28,6 +28,7 @@ LookerGoldenQuery, LookerQuery, MatchedQuery, + QueryParameter, QueryParameterValues, UserFunctions, ) @@ -121,6 +122,7 @@ "LookerGoldenQuery", "LookerQuery", "MatchedQuery", + "QueryParameter", "QueryParameterValues", "UserFunctions", "Conversation", diff --git a/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/types/context.py b/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/types/context.py index af53007dfd93..ac9618e89409 100644 --- a/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/types/context.py +++ b/packages/google-cloud-geminidataanalytics/google/cloud/geminidataanalytics_v1/types/context.py @@ -30,6 +30,7 @@ "BigQueryRoutine", "BigQueryRoutineReference", "ExampleQuery", + "QueryParameter", "MatchedQuery", "QueryParameterValues", "LookerGoldenQuery", @@ -324,6 +325,10 @@ class ExampleQuery(proto.Message): Optional. A natural language question that a user might ask. For example: "How many orders were placed last month?". + parameters (MutableSequence[google.cloud.geminidataanalytics_v1.types.QueryParameter]): + Optional. The list of query parameters. Example: The + parameterized SQL query "SELECT \* FROM my_table WHERE id = + @id" can be matched with any value of id. """ sql_query: str = proto.Field( @@ -335,6 +340,45 @@ class ExampleQuery(proto.Message): proto.STRING, number=1, ) + parameters: MutableSequence["QueryParameter"] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message="QueryParameter", + ) + + +class QueryParameter(proto.Message): + r"""A query parameter message represents a parameter that can be + used to parameterize a SQL query. + + Attributes: + name (str): + Required. The name of the parameter reference + in the SQL query. + description (str): + Optional. The description of the parameter + that can be used by LLM to extract the parameter + value from the user question. + data_type (str): + Required. The data type of the parameter, e.g. "STRING", + "INT64", "DATE", etc. For valid values, see the `BigQuery + documentation `__. + This will be used to populate + google.cloud.bigquery.v2.QueryParameterType.type. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + description: str = proto.Field( + proto.STRING, + number=2, + ) + data_type: str = proto.Field( + proto.STRING, + number=3, + ) class MatchedQuery(proto.Message): diff --git a/packages/google-cloud-geminidataanalytics/tests/unit/gapic/geminidataanalytics_v1/test_data_agent_service.py b/packages/google-cloud-geminidataanalytics/tests/unit/gapic/geminidataanalytics_v1/test_data_agent_service.py index 3568cb1e80ab..1bb3a564639f 100644 --- a/packages/google-cloud-geminidataanalytics/tests/unit/gapic/geminidataanalytics_v1/test_data_agent_service.py +++ b/packages/google-cloud-geminidataanalytics/tests/unit/gapic/geminidataanalytics_v1/test_data_agent_service.py @@ -9237,6 +9237,13 @@ def test_create_data_agent_rest_call_success(request_type): { "sql_query": "sql_query_value", "natural_language_question": "natural_language_question_value", + "parameters": [ + { + "name": "name_value", + "description": "description_value", + "data_type": "data_type_value", + } + ], } ], "looker_golden_queries": [ @@ -9566,6 +9573,13 @@ def test_create_data_agent_sync_rest_call_success(request_type): { "sql_query": "sql_query_value", "natural_language_question": "natural_language_question_value", + "parameters": [ + { + "name": "name_value", + "description": "description_value", + "data_type": "data_type_value", + } + ], } ], "looker_golden_queries": [ @@ -9910,6 +9924,13 @@ def test_update_data_agent_rest_call_success(request_type): { "sql_query": "sql_query_value", "natural_language_question": "natural_language_question_value", + "parameters": [ + { + "name": "name_value", + "description": "description_value", + "data_type": "data_type_value", + } + ], } ], "looker_golden_queries": [ @@ -10243,6 +10264,13 @@ def test_update_data_agent_sync_rest_call_success(request_type): { "sql_query": "sql_query_value", "natural_language_question": "natural_language_question_value", + "parameters": [ + { + "name": "name_value", + "description": "description_value", + "data_type": "data_type_value", + } + ], } ], "looker_golden_queries": [ diff --git a/packages/google-cloud-support/google/cloud/support/__init__.py b/packages/google-cloud-support/google/cloud/support/__init__.py index 610cc100295c..b290187ec314 100644 --- a/packages/google-cloud-support/google/cloud/support/__init__.py +++ b/packages/google-cloud-support/google/cloud/support/__init__.py @@ -35,6 +35,7 @@ from google.cloud.support_v2.types.actor import Actor from google.cloud.support_v2.types.attachment import Attachment from google.cloud.support_v2.types.attachment_service import ( + GetAttachmentRequest, ListAttachmentsRequest, ListAttachmentsResponse, ) @@ -55,6 +56,7 @@ from google.cloud.support_v2.types.comment import Comment from google.cloud.support_v2.types.comment_service import ( CreateCommentRequest, + GetCommentRequest, ListCommentsRequest, ListCommentsResponse, ) @@ -69,6 +71,7 @@ "CommentServiceAsyncClient", "Actor", "Attachment", + "GetAttachmentRequest", "ListAttachmentsRequest", "ListAttachmentsResponse", "Case", @@ -86,6 +89,7 @@ "UpdateCaseRequest", "Comment", "CreateCommentRequest", + "GetCommentRequest", "ListCommentsRequest", "ListCommentsResponse", "Escalation", diff --git a/packages/google-cloud-support/google/cloud/support_v2/__init__.py b/packages/google-cloud-support/google/cloud/support_v2/__init__.py index 375f01fbf34a..9649728d5e9c 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/__init__.py +++ b/packages/google-cloud-support/google/cloud/support_v2/__init__.py @@ -31,7 +31,11 @@ from .services.comment_service import CommentServiceAsyncClient, CommentServiceClient from .types.actor import Actor from .types.attachment import Attachment -from .types.attachment_service import ListAttachmentsRequest, ListAttachmentsResponse +from .types.attachment_service import ( + GetAttachmentRequest, + ListAttachmentsRequest, + ListAttachmentsResponse, +) from .types.case import Case, CaseClassification from .types.case_service import ( CloseCaseRequest, @@ -49,6 +53,7 @@ from .types.comment import Comment from .types.comment_service import ( CreateCommentRequest, + GetCommentRequest, ListCommentsRequest, ListCommentsResponse, ) @@ -154,7 +159,9 @@ def _get_version(dependency_name): "CreateCommentRequest", "EscalateCaseRequest", "Escalation", + "GetAttachmentRequest", "GetCaseRequest", + "GetCommentRequest", "ListAttachmentsRequest", "ListAttachmentsResponse", "ListCasesRequest", diff --git a/packages/google-cloud-support/google/cloud/support_v2/gapic_metadata.json b/packages/google-cloud-support/google/cloud/support_v2/gapic_metadata.json index bdb9792547f2..2dc5bec76621 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/gapic_metadata.json +++ b/packages/google-cloud-support/google/cloud/support_v2/gapic_metadata.json @@ -10,6 +10,11 @@ "grpc": { "libraryClient": "CaseAttachmentServiceClient", "rpcs": { + "GetAttachment": { + "methods": [ + "get_attachment" + ] + }, "ListAttachments": { "methods": [ "list_attachments" @@ -20,6 +25,11 @@ "grpc-async": { "libraryClient": "CaseAttachmentServiceAsyncClient", "rpcs": { + "GetAttachment": { + "methods": [ + "get_attachment" + ] + }, "ListAttachments": { "methods": [ "list_attachments" @@ -30,6 +40,11 @@ "rest": { "libraryClient": "CaseAttachmentServiceClient", "rpcs": { + "GetAttachment": { + "methods": [ + "get_attachment" + ] + }, "ListAttachments": { "methods": [ "list_attachments" @@ -188,6 +203,11 @@ "create_comment" ] }, + "GetComment": { + "methods": [ + "get_comment" + ] + }, "ListComments": { "methods": [ "list_comments" @@ -203,6 +223,11 @@ "create_comment" ] }, + "GetComment": { + "methods": [ + "get_comment" + ] + }, "ListComments": { "methods": [ "list_comments" @@ -218,6 +243,11 @@ "create_comment" ] }, + "GetComment": { + "methods": [ + "get_comment" + ] + }, "ListComments": { "methods": [ "list_comments" diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/async_client.py b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/async_client.py index 79359a3862ca..2f38baa2c3dd 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/async_client.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/async_client.py @@ -44,8 +44,10 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore + from google.cloud.support_v2.services.case_attachment_service import pagers -from google.cloud.support_v2.types import attachment, attachment_service +from google.cloud.support_v2.types import actor, attachment, attachment_service from .client import CaseAttachmentServiceClient from .transports.base import DEFAULT_CLIENT_INFO, CaseAttachmentServiceTransport @@ -436,6 +438,153 @@ async def sample_list_attachments(): # Done; return the response. return response + async def get_attachment( + self, + request: Optional[Union[attachment_service.GetAttachmentRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> attachment.Attachment: + r"""Retrieve an attachment associated with a support case. + + EXAMPLES: + + cURL: + + .. code:: shell + + attachment="projects/some-project/cases/23598314/attachments/0684M00000P3h1fQAB" + curl \ + --header "Authorization: Bearer $(gcloud auth print-access-token)" \ + "https://cloudsupport.googleapis.com/v2/$attachment" + + Python: + + .. code:: python + + import googleapiclient.discovery + + api_version = "v2" + supportApiService = googleapiclient.discovery.build( + serviceName="cloudsupport", + version=api_version, + discoveryServiceUrl=f"https://cloudsupport.googleapis.com/$discovery/rest?version={api_version}", + ) + request = ( + supportApiService.cases() + .attachments() + .get(name="projects/some-project/cases/43595344/attachments/0684M00000P3h1fQAB") + ) + print(request.execute()) + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import support_v2 + + async def sample_get_attachment(): + # Create a client + client = support_v2.CaseAttachmentServiceAsyncClient() + + # Initialize request argument(s) + request = support_v2.GetAttachmentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_attachment(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.support_v2.types.GetAttachmentRequest, dict]]): + The request object. Request for getting an attachment. + name (:class:`str`): + Required. The name of the attachment + to get. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.support_v2.types.Attachment: + An Attachment contains metadata about a file that was uploaded to a + case - it is NOT a file itself. That being said, the + name of an Attachment object can be used to download + its accompanying file through the media.download + endpoint. + + While attachments can be uploaded in the console at + the same time as a comment, they're associated on a + "case" level, not a "comment" level. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, attachment_service.GetAttachmentRequest): + request = attachment_service.GetAttachmentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_attachment + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def __aenter__(self) -> "CaseAttachmentServiceAsyncClient": return self diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/client.py b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/client.py index 667cc7659792..462273cf98a2 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/client.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/client.py @@ -61,8 +61,10 @@ _LOGGER = std_logging.getLogger(__name__) +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore + from google.cloud.support_v2.services.case_attachment_service import pagers -from google.cloud.support_v2.types import attachment, attachment_service +from google.cloud.support_v2.types import actor, attachment, attachment_service from .transports.base import DEFAULT_CLIENT_INFO, CaseAttachmentServiceTransport from .transports.grpc import CaseAttachmentServiceGrpcTransport @@ -879,6 +881,150 @@ def sample_list_attachments(): # Done; return the response. return response + def get_attachment( + self, + request: Optional[Union[attachment_service.GetAttachmentRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> attachment.Attachment: + r"""Retrieve an attachment associated with a support case. + + EXAMPLES: + + cURL: + + .. code:: shell + + attachment="projects/some-project/cases/23598314/attachments/0684M00000P3h1fQAB" + curl \ + --header "Authorization: Bearer $(gcloud auth print-access-token)" \ + "https://cloudsupport.googleapis.com/v2/$attachment" + + Python: + + .. code:: python + + import googleapiclient.discovery + + api_version = "v2" + supportApiService = googleapiclient.discovery.build( + serviceName="cloudsupport", + version=api_version, + discoveryServiceUrl=f"https://cloudsupport.googleapis.com/$discovery/rest?version={api_version}", + ) + request = ( + supportApiService.cases() + .attachments() + .get(name="projects/some-project/cases/43595344/attachments/0684M00000P3h1fQAB") + ) + print(request.execute()) + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import support_v2 + + def sample_get_attachment(): + # Create a client + client = support_v2.CaseAttachmentServiceClient() + + # Initialize request argument(s) + request = support_v2.GetAttachmentRequest( + name="name_value", + ) + + # Make the request + response = client.get_attachment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.support_v2.types.GetAttachmentRequest, dict]): + The request object. Request for getting an attachment. + name (str): + Required. The name of the attachment + to get. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.support_v2.types.Attachment: + An Attachment contains metadata about a file that was uploaded to a + case - it is NOT a file itself. That being said, the + name of an Attachment object can be used to download + its accompanying file through the media.download + endpoint. + + While attachments can be uploaded in the console at + the same time as a comment, they're associated on a + "case" level, not a "comment" level. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, attachment_service.GetAttachmentRequest): + request = attachment_service.GetAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_attachment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "CaseAttachmentServiceClient": return self diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/base.py b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/base.py index 611e0f33acc8..1b5b48e06ff4 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/base.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/base.py @@ -26,7 +26,7 @@ from google.oauth2 import service_account # type: ignore from google.cloud.support_v2 import gapic_version as package_version -from google.cloud.support_v2.types import attachment_service +from google.cloud.support_v2.types import attachment, attachment_service DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -156,6 +156,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.get_attachment: gapic_v1.method.wrap_method( + self.get_attachment, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -179,6 +184,15 @@ def list_attachments( ]: raise NotImplementedError() + @property + def get_attachment( + self, + ) -> Callable[ + [attachment_service.GetAttachmentRequest], + Union[attachment.Attachment, Awaitable[attachment.Attachment]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/grpc.py b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/grpc.py index d44b204be334..640b4e57cc92 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/grpc.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/grpc.py @@ -28,7 +28,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.protobuf.json_format import MessageToJson -from google.cloud.support_v2.types import attachment_service +from google.cloud.support_v2.types import attachment, attachment_service from .base import DEFAULT_CLIENT_INFO, CaseAttachmentServiceTransport @@ -356,6 +356,62 @@ def list_attachments( ) return self._stubs["list_attachments"] + @property + def get_attachment( + self, + ) -> Callable[[attachment_service.GetAttachmentRequest], attachment.Attachment]: + r"""Return a callable for the get attachment method over gRPC. + + Retrieve an attachment associated with a support case. + + EXAMPLES: + + cURL: + + .. code:: shell + + attachment="projects/some-project/cases/23598314/attachments/0684M00000P3h1fQAB" + curl \ + --header "Authorization: Bearer $(gcloud auth print-access-token)" \ + "https://cloudsupport.googleapis.com/v2/$attachment" + + Python: + + .. code:: python + + import googleapiclient.discovery + + api_version = "v2" + supportApiService = googleapiclient.discovery.build( + serviceName="cloudsupport", + version=api_version, + discoveryServiceUrl=f"https://cloudsupport.googleapis.com/$discovery/rest?version={api_version}", + ) + request = ( + supportApiService.cases() + .attachments() + .get(name="projects/some-project/cases/43595344/attachments/0684M00000P3h1fQAB") + ) + print(request.execute()) + + Returns: + Callable[[~.GetAttachmentRequest], + ~.Attachment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_attachment" not in self._stubs: + self._stubs["get_attachment"] = self._logged_channel.unary_unary( + "/google.cloud.support.v2.CaseAttachmentService/GetAttachment", + request_serializer=attachment_service.GetAttachmentRequest.serialize, + response_deserializer=attachment.Attachment.deserialize, + ) + return self._stubs["get_attachment"] + def close(self): self._logged_channel.close() diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/grpc_asyncio.py b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/grpc_asyncio.py index de1496eb7b41..ce188a453c17 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/grpc_asyncio.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/grpc_asyncio.py @@ -31,7 +31,7 @@ from google.protobuf.json_format import MessageToJson from grpc.experimental import aio # type: ignore -from google.cloud.support_v2.types import attachment_service +from google.cloud.support_v2.types import attachment, attachment_service from .base import DEFAULT_CLIENT_INFO, CaseAttachmentServiceTransport from .grpc import CaseAttachmentServiceGrpcTransport @@ -364,6 +364,64 @@ def list_attachments( ) return self._stubs["list_attachments"] + @property + def get_attachment( + self, + ) -> Callable[ + [attachment_service.GetAttachmentRequest], Awaitable[attachment.Attachment] + ]: + r"""Return a callable for the get attachment method over gRPC. + + Retrieve an attachment associated with a support case. + + EXAMPLES: + + cURL: + + .. code:: shell + + attachment="projects/some-project/cases/23598314/attachments/0684M00000P3h1fQAB" + curl \ + --header "Authorization: Bearer $(gcloud auth print-access-token)" \ + "https://cloudsupport.googleapis.com/v2/$attachment" + + Python: + + .. code:: python + + import googleapiclient.discovery + + api_version = "v2" + supportApiService = googleapiclient.discovery.build( + serviceName="cloudsupport", + version=api_version, + discoveryServiceUrl=f"https://cloudsupport.googleapis.com/$discovery/rest?version={api_version}", + ) + request = ( + supportApiService.cases() + .attachments() + .get(name="projects/some-project/cases/43595344/attachments/0684M00000P3h1fQAB") + ) + print(request.execute()) + + Returns: + Callable[[~.GetAttachmentRequest], + Awaitable[~.Attachment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_attachment" not in self._stubs: + self._stubs["get_attachment"] = self._logged_channel.unary_unary( + "/google.cloud.support.v2.CaseAttachmentService/GetAttachment", + request_serializer=attachment_service.GetAttachmentRequest.serialize, + response_deserializer=attachment.Attachment.deserialize, + ) + return self._stubs["get_attachment"] + def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { @@ -381,6 +439,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.get_attachment: self._wrap_method( + self.get_attachment, + default_timeout=None, + client_info=client_info, + ), } def _wrap_method(self, func, *args, **kwargs): diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/rest.py b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/rest.py index a44105e3fb0b..e85df7f13954 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/rest.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/rest.py @@ -28,7 +28,7 @@ from google.protobuf import json_format from requests import __version__ as requests_version -from google.cloud.support_v2.types import attachment_service +from google.cloud.support_v2.types import attachment, attachment_service from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO from .rest_base import _BaseCaseAttachmentServiceRestTransport @@ -72,6 +72,14 @@ class CaseAttachmentServiceRestInterceptor: .. code-block:: python class MyCustomCaseAttachmentServiceInterceptor(CaseAttachmentServiceRestInterceptor): + def pre_get_attachment(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_attachment(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_attachments(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -86,6 +94,54 @@ def post_list_attachments(self, response): """ + def pre_get_attachment( + self, + request: attachment_service.GetAttachmentRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + attachment_service.GetAttachmentRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_attachment + + Override in a subclass to manipulate the request or metadata + before they are sent to the CaseAttachmentService server. + """ + return request, metadata + + def post_get_attachment( + self, response: attachment.Attachment + ) -> attachment.Attachment: + """Post-rpc interceptor for get_attachment + + DEPRECATED. Please use the `post_get_attachment_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the CaseAttachmentService server but before + it is returned to user code. This `post_get_attachment` interceptor runs + before the `post_get_attachment_with_metadata` interceptor. + """ + return response + + def post_get_attachment_with_metadata( + self, + response: attachment.Attachment, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[attachment.Attachment, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_attachment + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the CaseAttachmentService server but before it is returned to user code. + + We recommend only using this `post_get_attachment_with_metadata` + interceptor in new development instead of the `post_get_attachment` interceptor. + When both interceptors are used, this `post_get_attachment_with_metadata` interceptor runs after the + `post_get_attachment` interceptor. The (possibly modified) response returned by + `post_get_attachment` will be passed to + `post_get_attachment_with_metadata`. + """ + return response, metadata + def pre_list_attachments( self, request: attachment_service.ListAttachmentsRequest, @@ -233,6 +289,158 @@ def __init__( self._interceptor = interceptor or CaseAttachmentServiceRestInterceptor() self._prep_wrapped_messages(client_info) + class _GetAttachment( + _BaseCaseAttachmentServiceRestTransport._BaseGetAttachment, + CaseAttachmentServiceRestStub, + ): + def __hash__(self): + return hash("CaseAttachmentServiceRestTransport.GetAttachment") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: attachment_service.GetAttachmentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> attachment.Attachment: + r"""Call the get attachment method over HTTP. + + Args: + request (~.attachment_service.GetAttachmentRequest): + The request object. Request for getting an attachment. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.attachment.Attachment: + An Attachment contains metadata about a file that was + uploaded to a case - it is NOT a file itself. That being + said, the name of an Attachment object can be used to + download its accompanying file through the + ``media.download`` endpoint. + + While attachments can be uploaded in the console at the + same time as a comment, they're associated on a "case" + level, not a "comment" level. + + """ + + http_options = _BaseCaseAttachmentServiceRestTransport._BaseGetAttachment._get_http_options() + + request, metadata = self._interceptor.pre_get_attachment(request, metadata) + transcoded_request = _BaseCaseAttachmentServiceRestTransport._BaseGetAttachment._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseCaseAttachmentServiceRestTransport._BaseGetAttachment._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.support_v2.CaseAttachmentServiceClient.GetAttachment", + extra={ + "serviceName": "google.cloud.support.v2.CaseAttachmentService", + "rpcName": "GetAttachment", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = CaseAttachmentServiceRestTransport._GetAttachment._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = attachment.Attachment() + pb_resp = attachment.Attachment.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_attachment(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_attachment_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = attachment.Attachment.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.support_v2.CaseAttachmentServiceClient.get_attachment", + extra={ + "serviceName": "google.cloud.support.v2.CaseAttachmentService", + "rpcName": "GetAttachment", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + class _ListAttachments( _BaseCaseAttachmentServiceRestTransport._BaseListAttachments, CaseAttachmentServiceRestStub, @@ -385,6 +593,14 @@ def __call__( ) return resp + @property + def get_attachment( + self, + ) -> Callable[[attachment_service.GetAttachmentRequest], attachment.Attachment]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetAttachment(self._session, self._host, self._interceptor) # type: ignore + @property def list_attachments( self, diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/rest_base.py b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/rest_base.py index aae71d041e26..18c10bb37168 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/rest_base.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/case_attachment_service/transports/rest_base.py @@ -20,7 +20,7 @@ from google.api_core import gapic_v1, path_template from google.protobuf import json_format -from google.cloud.support_v2.types import attachment_service +from google.cloud.support_v2.types import attachment, attachment_service from .base import DEFAULT_CLIENT_INFO, CaseAttachmentServiceTransport @@ -87,6 +87,53 @@ def __init__( api_audience=api_audience, ) + class _BaseGetAttachment: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=*/*/cases/*/attachments/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = attachment_service.GetAttachmentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseCaseAttachmentServiceRestTransport._BaseGetAttachment._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseListAttachments: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/async_client.py b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/async_client.py index fa7a64c4edbc..72162cf4aa4f 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/async_client.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/async_client.py @@ -550,6 +550,151 @@ async def sample_create_comment(): # Done; return the response. return response + async def get_comment( + self, + request: Optional[Union[comment_service.GetCommentRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> comment.Comment: + r"""Retrieve a comment. + + EXAMPLES: + + cURL: + + .. code:: shell + + comment="projects/some-project/cases/43595344/comments/234567890" + curl \ + --header "Authorization: Bearer $(gcloud auth print-access-token)" \ + "https://cloudsupport.googleapis.com/v2/$comment" + + Python: + + .. code:: python + + import googleapiclient.discovery + + api_version = "v2" + supportApiService = googleapiclient.discovery.build( + serviceName="cloudsupport", + version=api_version, + discoveryServiceUrl=f"https://cloudsupport.googleapis.com/$discovery/rest?version={api_version}", + ) + + request = supportApiService.cases().comments().get( + name="projects/some-project/cases/43595344/comments/234567890", + ) + print(request.execute()) + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import support_v2 + + async def sample_get_comment(): + # Create a client + client = support_v2.CommentServiceAsyncClient() + + # Initialize request argument(s) + request = support_v2.GetCommentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_comment(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.support_v2.types.GetCommentRequest, dict]]): + The request object. The request message for the + GetComment endpoint. + name (:class:`str`): + Required. The name of the comment to + retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.support_v2.types.Comment: + A comment associated with a support + case. + Case comments are the primary way for + Google Support to communicate with a + user who has opened a case. When a user + responds to Google Support, the user's + responses also appear as comments. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, comment_service.GetCommentRequest): + request = comment_service.GetCommentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_comment + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def __aenter__(self) -> "CommentServiceAsyncClient": return self diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/client.py b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/client.py index 87518ab013d4..7ad0cea3aeb2 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/client.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/client.py @@ -993,6 +993,148 @@ def sample_create_comment(): # Done; return the response. return response + def get_comment( + self, + request: Optional[Union[comment_service.GetCommentRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> comment.Comment: + r"""Retrieve a comment. + + EXAMPLES: + + cURL: + + .. code:: shell + + comment="projects/some-project/cases/43595344/comments/234567890" + curl \ + --header "Authorization: Bearer $(gcloud auth print-access-token)" \ + "https://cloudsupport.googleapis.com/v2/$comment" + + Python: + + .. code:: python + + import googleapiclient.discovery + + api_version = "v2" + supportApiService = googleapiclient.discovery.build( + serviceName="cloudsupport", + version=api_version, + discoveryServiceUrl=f"https://cloudsupport.googleapis.com/$discovery/rest?version={api_version}", + ) + + request = supportApiService.cases().comments().get( + name="projects/some-project/cases/43595344/comments/234567890", + ) + print(request.execute()) + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import support_v2 + + def sample_get_comment(): + # Create a client + client = support_v2.CommentServiceClient() + + # Initialize request argument(s) + request = support_v2.GetCommentRequest( + name="name_value", + ) + + # Make the request + response = client.get_comment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.support_v2.types.GetCommentRequest, dict]): + The request object. The request message for the + GetComment endpoint. + name (str): + Required. The name of the comment to + retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.support_v2.types.Comment: + A comment associated with a support + case. + Case comments are the primary way for + Google Support to communicate with a + user who has opened a case. When a user + responds to Google Support, the user's + responses also appear as comments. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, comment_service.GetCommentRequest): + request = comment_service.GetCommentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_comment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "CommentServiceClient": return self diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/base.py b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/base.py index b576052fcfd7..ec747f294bc9 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/base.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/base.py @@ -26,8 +26,8 @@ from google.oauth2 import service_account # type: ignore from google.cloud.support_v2 import gapic_version as package_version +from google.cloud.support_v2.types import comment, comment_service from google.cloud.support_v2.types import comment as gcs_comment -from google.cloud.support_v2.types import comment_service DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -162,6 +162,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.get_comment: gapic_v1.method.wrap_method( + self.get_comment, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -194,6 +199,15 @@ def create_comment( ]: raise NotImplementedError() + @property + def get_comment( + self, + ) -> Callable[ + [comment_service.GetCommentRequest], + Union[comment.Comment, Awaitable[comment.Comment]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/grpc.py b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/grpc.py index 899519a60616..eb815f5839cc 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/grpc.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/grpc.py @@ -28,8 +28,8 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.protobuf.json_format import MessageToJson +from google.cloud.support_v2.types import comment, comment_service from google.cloud.support_v2.types import comment as gcs_comment -from google.cloud.support_v2.types import comment_service from .base import DEFAULT_CLIENT_INFO, CommentServiceTransport @@ -382,6 +382,61 @@ def create_comment( ) return self._stubs["create_comment"] + @property + def get_comment( + self, + ) -> Callable[[comment_service.GetCommentRequest], comment.Comment]: + r"""Return a callable for the get comment method over gRPC. + + Retrieve a comment. + + EXAMPLES: + + cURL: + + .. code:: shell + + comment="projects/some-project/cases/43595344/comments/234567890" + curl \ + --header "Authorization: Bearer $(gcloud auth print-access-token)" \ + "https://cloudsupport.googleapis.com/v2/$comment" + + Python: + + .. code:: python + + import googleapiclient.discovery + + api_version = "v2" + supportApiService = googleapiclient.discovery.build( + serviceName="cloudsupport", + version=api_version, + discoveryServiceUrl=f"https://cloudsupport.googleapis.com/$discovery/rest?version={api_version}", + ) + + request = supportApiService.cases().comments().get( + name="projects/some-project/cases/43595344/comments/234567890", + ) + print(request.execute()) + + Returns: + Callable[[~.GetCommentRequest], + ~.Comment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_comment" not in self._stubs: + self._stubs["get_comment"] = self._logged_channel.unary_unary( + "/google.cloud.support.v2.CommentService/GetComment", + request_serializer=comment_service.GetCommentRequest.serialize, + response_deserializer=comment.Comment.deserialize, + ) + return self._stubs["get_comment"] + def close(self): self._logged_channel.close() diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/grpc_asyncio.py b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/grpc_asyncio.py index fd447b10f6b8..5c1b8b48bf42 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/grpc_asyncio.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/grpc_asyncio.py @@ -31,8 +31,8 @@ from google.protobuf.json_format import MessageToJson from grpc.experimental import aio # type: ignore +from google.cloud.support_v2.types import comment, comment_service from google.cloud.support_v2.types import comment as gcs_comment -from google.cloud.support_v2.types import comment_service from .base import DEFAULT_CLIENT_INFO, CommentServiceTransport from .grpc import CommentServiceGrpcTransport @@ -393,6 +393,61 @@ def create_comment( ) return self._stubs["create_comment"] + @property + def get_comment( + self, + ) -> Callable[[comment_service.GetCommentRequest], Awaitable[comment.Comment]]: + r"""Return a callable for the get comment method over gRPC. + + Retrieve a comment. + + EXAMPLES: + + cURL: + + .. code:: shell + + comment="projects/some-project/cases/43595344/comments/234567890" + curl \ + --header "Authorization: Bearer $(gcloud auth print-access-token)" \ + "https://cloudsupport.googleapis.com/v2/$comment" + + Python: + + .. code:: python + + import googleapiclient.discovery + + api_version = "v2" + supportApiService = googleapiclient.discovery.build( + serviceName="cloudsupport", + version=api_version, + discoveryServiceUrl=f"https://cloudsupport.googleapis.com/$discovery/rest?version={api_version}", + ) + + request = supportApiService.cases().comments().get( + name="projects/some-project/cases/43595344/comments/234567890", + ) + print(request.execute()) + + Returns: + Callable[[~.GetCommentRequest], + Awaitable[~.Comment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_comment" not in self._stubs: + self._stubs["get_comment"] = self._logged_channel.unary_unary( + "/google.cloud.support.v2.CommentService/GetComment", + request_serializer=comment_service.GetCommentRequest.serialize, + response_deserializer=comment.Comment.deserialize, + ) + return self._stubs["get_comment"] + def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { @@ -415,6 +470,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.get_comment: self._wrap_method( + self.get_comment, + default_timeout=None, + client_info=client_info, + ), } def _wrap_method(self, func, *args, **kwargs): diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/rest.py b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/rest.py index 709a54b3285b..ca14990455af 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/rest.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/rest.py @@ -28,8 +28,8 @@ from google.protobuf import json_format from requests import __version__ as requests_version +from google.cloud.support_v2.types import comment, comment_service from google.cloud.support_v2.types import comment as gcs_comment -from google.cloud.support_v2.types import comment_service from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO from .rest_base import _BaseCommentServiceRestTransport @@ -81,6 +81,14 @@ def post_create_comment(self, response): logging.log(f"Received response: {response}") return response + def pre_get_comment(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_comment(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_comments(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -141,6 +149,52 @@ def post_create_comment_with_metadata( """ return response, metadata + def pre_get_comment( + self, + request: comment_service.GetCommentRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + comment_service.GetCommentRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_comment + + Override in a subclass to manipulate the request or metadata + before they are sent to the CommentService server. + """ + return request, metadata + + def post_get_comment(self, response: comment.Comment) -> comment.Comment: + """Post-rpc interceptor for get_comment + + DEPRECATED. Please use the `post_get_comment_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the CommentService server but before + it is returned to user code. This `post_get_comment` interceptor runs + before the `post_get_comment_with_metadata` interceptor. + """ + return response + + def post_get_comment_with_metadata( + self, + response: comment.Comment, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[comment.Comment, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_comment + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the CommentService server but before it is returned to user code. + + We recommend only using this `post_get_comment_with_metadata` + interceptor in new development instead of the `post_get_comment` interceptor. + When both interceptors are used, this `post_get_comment_with_metadata` interceptor runs after the + `post_get_comment` interceptor. The (possibly modified) response returned by + `post_get_comment` will be passed to + `post_get_comment_with_metadata`. + """ + return response, metadata + def pre_list_comments( self, request: comment_service.ListCommentsRequest, @@ -443,6 +497,160 @@ def __call__( ) return resp + class _GetComment( + _BaseCommentServiceRestTransport._BaseGetComment, CommentServiceRestStub + ): + def __hash__(self): + return hash("CommentServiceRestTransport.GetComment") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: comment_service.GetCommentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> comment.Comment: + r"""Call the get comment method over HTTP. + + Args: + request (~.comment_service.GetCommentRequest): + The request object. The request message for the + GetComment endpoint. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.comment.Comment: + A comment associated with a support + case. + Case comments are the primary way for + Google Support to communicate with a + user who has opened a case. When a user + responds to Google Support, the user's + responses also appear as comments. + + """ + + http_options = ( + _BaseCommentServiceRestTransport._BaseGetComment._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_comment(request, metadata) + transcoded_request = _BaseCommentServiceRestTransport._BaseGetComment._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = ( + _BaseCommentServiceRestTransport._BaseGetComment._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.support_v2.CommentServiceClient.GetComment", + extra={ + "serviceName": "google.cloud.support.v2.CommentService", + "rpcName": "GetComment", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = CommentServiceRestTransport._GetComment._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = comment.Comment() + pb_resp = comment.Comment.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_comment(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_comment_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = comment.Comment.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.support_v2.CommentServiceClient.get_comment", + extra={ + "serviceName": "google.cloud.support.v2.CommentService", + "rpcName": "GetComment", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + class _ListComments( _BaseCommentServiceRestTransport._BaseListComments, CommentServiceRestStub ): @@ -600,6 +808,14 @@ def create_comment( # In C++ this would require a dynamic_cast return self._CreateComment(self._session, self._host, self._interceptor) # type: ignore + @property + def get_comment( + self, + ) -> Callable[[comment_service.GetCommentRequest], comment.Comment]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetComment(self._session, self._host, self._interceptor) # type: ignore + @property def list_comments( self, diff --git a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/rest_base.py b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/rest_base.py index a1b94a2ad008..df45b05f5a97 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/rest_base.py +++ b/packages/google-cloud-support/google/cloud/support_v2/services/comment_service/transports/rest_base.py @@ -20,8 +20,8 @@ from google.api_core import gapic_v1, path_template from google.protobuf import json_format +from google.cloud.support_v2.types import comment, comment_service from google.cloud.support_v2.types import comment as gcs_comment -from google.cloud.support_v2.types import comment_service from .base import DEFAULT_CLIENT_INFO, CommentServiceTransport @@ -150,6 +150,53 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseGetComment: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=*/*/cases/*/comments/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = comment_service.GetCommentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseCommentServiceRestTransport._BaseGetComment._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseListComments: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") diff --git a/packages/google-cloud-support/google/cloud/support_v2/types/__init__.py b/packages/google-cloud-support/google/cloud/support_v2/types/__init__.py index f9ae7fff08da..a3a0983bf28c 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/types/__init__.py +++ b/packages/google-cloud-support/google/cloud/support_v2/types/__init__.py @@ -20,6 +20,7 @@ Attachment, ) from .attachment_service import ( + GetAttachmentRequest, ListAttachmentsRequest, ListAttachmentsResponse, ) @@ -45,6 +46,7 @@ ) from .comment_service import ( CreateCommentRequest, + GetCommentRequest, ListCommentsRequest, ListCommentsResponse, ) @@ -55,6 +57,7 @@ __all__ = ( "Actor", "Attachment", + "GetAttachmentRequest", "ListAttachmentsRequest", "ListAttachmentsResponse", "Case", @@ -72,6 +75,7 @@ "UpdateCaseRequest", "Comment", "CreateCommentRequest", + "GetCommentRequest", "ListCommentsRequest", "ListCommentsResponse", "Escalation", diff --git a/packages/google-cloud-support/google/cloud/support_v2/types/attachment_service.py b/packages/google-cloud-support/google/cloud/support_v2/types/attachment_service.py index f49b00a4e624..d6cc727141d9 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/types/attachment_service.py +++ b/packages/google-cloud-support/google/cloud/support_v2/types/attachment_service.py @@ -25,6 +25,7 @@ package="google.cloud.support.v2", manifest={ "ListAttachmentsRequest", + "GetAttachmentRequest", "ListAttachmentsResponse", }, ) @@ -68,6 +69,20 @@ class ListAttachmentsRequest(proto.Message): ) +class GetAttachmentRequest(proto.Message): + r"""Request for getting an attachment. + + Attributes: + name (str): + Required. The name of the attachment to get. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + class ListAttachmentsResponse(proto.Message): r"""The response message for the ListAttachments endpoint. diff --git a/packages/google-cloud-support/google/cloud/support_v2/types/comment_service.py b/packages/google-cloud-support/google/cloud/support_v2/types/comment_service.py index ea071f9cd60d..eb5d71ce8361 100644 --- a/packages/google-cloud-support/google/cloud/support_v2/types/comment_service.py +++ b/packages/google-cloud-support/google/cloud/support_v2/types/comment_service.py @@ -27,6 +27,7 @@ "ListCommentsRequest", "ListCommentsResponse", "CreateCommentRequest", + "GetCommentRequest", }, ) @@ -112,4 +113,19 @@ class CreateCommentRequest(proto.Message): ) +class GetCommentRequest(proto.Message): + r"""The request message for the GetComment endpoint. + + Attributes: + name (str): + Required. The name of the comment to + retrieve. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_case_attachment_service_get_attachment_async.py b/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_case_attachment_service_get_attachment_async.py new file mode 100644 index 000000000000..7b1c990fa084 --- /dev/null +++ b/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_case_attachment_service_get_attachment_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAttachment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-support + + +# [START cloudsupport_v2_generated_CaseAttachmentService_GetAttachment_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import support_v2 + + +async def sample_get_attachment(): + # Create a client + client = support_v2.CaseAttachmentServiceAsyncClient() + + # Initialize request argument(s) + request = support_v2.GetAttachmentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_attachment(request=request) + + # Handle the response + print(response) + + +# [END cloudsupport_v2_generated_CaseAttachmentService_GetAttachment_async] diff --git a/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_case_attachment_service_get_attachment_sync.py b/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_case_attachment_service_get_attachment_sync.py new file mode 100644 index 000000000000..1c5daeaad9ae --- /dev/null +++ b/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_case_attachment_service_get_attachment_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAttachment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-support + + +# [START cloudsupport_v2_generated_CaseAttachmentService_GetAttachment_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import support_v2 + + +def sample_get_attachment(): + # Create a client + client = support_v2.CaseAttachmentServiceClient() + + # Initialize request argument(s) + request = support_v2.GetAttachmentRequest( + name="name_value", + ) + + # Make the request + response = client.get_attachment(request=request) + + # Handle the response + print(response) + + +# [END cloudsupport_v2_generated_CaseAttachmentService_GetAttachment_sync] diff --git a/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_comment_service_get_comment_async.py b/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_comment_service_get_comment_async.py new file mode 100644 index 000000000000..607abae1d57f --- /dev/null +++ b/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_comment_service_get_comment_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetComment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-support + + +# [START cloudsupport_v2_generated_CommentService_GetComment_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import support_v2 + + +async def sample_get_comment(): + # Create a client + client = support_v2.CommentServiceAsyncClient() + + # Initialize request argument(s) + request = support_v2.GetCommentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_comment(request=request) + + # Handle the response + print(response) + + +# [END cloudsupport_v2_generated_CommentService_GetComment_async] diff --git a/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_comment_service_get_comment_sync.py b/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_comment_service_get_comment_sync.py new file mode 100644 index 000000000000..fc9c25098c37 --- /dev/null +++ b/packages/google-cloud-support/samples/generated_samples/cloudsupport_v2_generated_comment_service_get_comment_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetComment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-support + + +# [START cloudsupport_v2_generated_CommentService_GetComment_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import support_v2 + + +def sample_get_comment(): + # Create a client + client = support_v2.CommentServiceClient() + + # Initialize request argument(s) + request = support_v2.GetCommentRequest( + name="name_value", + ) + + # Make the request + response = client.get_comment(request=request) + + # Handle the response + print(response) + + +# [END cloudsupport_v2_generated_CommentService_GetComment_sync] diff --git a/packages/google-cloud-support/samples/generated_samples/snippet_metadata_google.cloud.support.v2.json b/packages/google-cloud-support/samples/generated_samples/snippet_metadata_google.cloud.support.v2.json index 5fdc5e4efb13..03d3311efded 100644 --- a/packages/google-cloud-support/samples/generated_samples/snippet_metadata_google.cloud.support.v2.json +++ b/packages/google-cloud-support/samples/generated_samples/snippet_metadata_google.cloud.support.v2.json @@ -11,6 +11,167 @@ "version": "0.4.0" }, "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.support_v2.CaseAttachmentServiceAsyncClient", + "shortName": "CaseAttachmentServiceAsyncClient" + }, + "fullName": "google.cloud.support_v2.CaseAttachmentServiceAsyncClient.get_attachment", + "method": { + "fullName": "google.cloud.support.v2.CaseAttachmentService.GetAttachment", + "service": { + "fullName": "google.cloud.support.v2.CaseAttachmentService", + "shortName": "CaseAttachmentService" + }, + "shortName": "GetAttachment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.support_v2.types.GetAttachmentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.support_v2.types.Attachment", + "shortName": "get_attachment" + }, + "description": "Sample for GetAttachment", + "file": "cloudsupport_v2_generated_case_attachment_service_get_attachment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "cloudsupport_v2_generated_CaseAttachmentService_GetAttachment_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "cloudsupport_v2_generated_case_attachment_service_get_attachment_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.support_v2.CaseAttachmentServiceClient", + "shortName": "CaseAttachmentServiceClient" + }, + "fullName": "google.cloud.support_v2.CaseAttachmentServiceClient.get_attachment", + "method": { + "fullName": "google.cloud.support.v2.CaseAttachmentService.GetAttachment", + "service": { + "fullName": "google.cloud.support.v2.CaseAttachmentService", + "shortName": "CaseAttachmentService" + }, + "shortName": "GetAttachment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.support_v2.types.GetAttachmentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.support_v2.types.Attachment", + "shortName": "get_attachment" + }, + "description": "Sample for GetAttachment", + "file": "cloudsupport_v2_generated_case_attachment_service_get_attachment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "cloudsupport_v2_generated_CaseAttachmentService_GetAttachment_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "cloudsupport_v2_generated_case_attachment_service_get_attachment_sync.py" + }, { "canonical": true, "clientMethod": { @@ -1613,6 +1774,167 @@ ], "title": "cloudsupport_v2_generated_comment_service_create_comment_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.support_v2.CommentServiceAsyncClient", + "shortName": "CommentServiceAsyncClient" + }, + "fullName": "google.cloud.support_v2.CommentServiceAsyncClient.get_comment", + "method": { + "fullName": "google.cloud.support.v2.CommentService.GetComment", + "service": { + "fullName": "google.cloud.support.v2.CommentService", + "shortName": "CommentService" + }, + "shortName": "GetComment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.support_v2.types.GetCommentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.support_v2.types.Comment", + "shortName": "get_comment" + }, + "description": "Sample for GetComment", + "file": "cloudsupport_v2_generated_comment_service_get_comment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "cloudsupport_v2_generated_CommentService_GetComment_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "cloudsupport_v2_generated_comment_service_get_comment_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.support_v2.CommentServiceClient", + "shortName": "CommentServiceClient" + }, + "fullName": "google.cloud.support_v2.CommentServiceClient.get_comment", + "method": { + "fullName": "google.cloud.support.v2.CommentService.GetComment", + "service": { + "fullName": "google.cloud.support.v2.CommentService", + "shortName": "CommentService" + }, + "shortName": "GetComment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.support_v2.types.GetCommentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.support_v2.types.Comment", + "shortName": "get_comment" + }, + "description": "Sample for GetComment", + "file": "cloudsupport_v2_generated_comment_service_get_comment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "cloudsupport_v2_generated_CommentService_GetComment_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "cloudsupport_v2_generated_comment_service_get_comment_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/packages/google-cloud-support/tests/unit/gapic/support_v2/test_case_attachment_service.py b/packages/google-cloud-support/tests/unit/gapic/support_v2/test_case_attachment_service.py index 1e2a03c3eb39..ef9330c89b23 100644 --- a/packages/google-cloud-support/tests/unit/gapic/support_v2/test_case_attachment_service.py +++ b/packages/google-cloud-support/tests/unit/gapic/support_v2/test_case_attachment_service.py @@ -38,6 +38,7 @@ HAS_GOOGLE_AUTH_AIO = False import google.auth +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from google.api_core import ( client_options, gapic_v1, @@ -57,7 +58,7 @@ pagers, transports, ) -from google.cloud.support_v2.types import attachment, attachment_service +from google.cloud.support_v2.types import actor, attachment, attachment_service CRED_INFO_JSON = { "credential_source": "/path/to/file", @@ -1887,6 +1888,342 @@ async def test_list_attachments_async_pages(): assert page_.raw_page.next_page_token == token +@pytest.mark.parametrize( + "request_type", + [ + attachment_service.GetAttachmentRequest, + dict, + ], +) +def test_get_attachment(request_type, transport: str = "grpc"): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_attachment), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = attachment.Attachment( + name="name_value", + filename="filename_value", + mime_type="mime_type_value", + size_bytes=1089, + ) + response = client.get_attachment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = attachment_service.GetAttachmentRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, attachment.Attachment) + assert response.name == "name_value" + assert response.filename == "filename_value" + assert response.mime_type == "mime_type_value" + assert response.size_bytes == 1089 + + +def test_get_attachment_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = attachment_service.GetAttachmentRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_attachment), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_attachment(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == attachment_service.GetAttachmentRequest( + name="name_value", + ) + + +def test_get_attachment_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_attachment in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_attachment] = mock_rpc + request = {} + client.get_attachment(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_attachment(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_attachment_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = CaseAttachmentServiceAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_attachment + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_attachment + ] = mock_rpc + + request = {} + await client.get_attachment(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_attachment(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_attachment_async( + transport: str = "grpc_asyncio", + request_type=attachment_service.GetAttachmentRequest, +): + client = CaseAttachmentServiceAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_attachment), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + attachment.Attachment( + name="name_value", + filename="filename_value", + mime_type="mime_type_value", + size_bytes=1089, + ) + ) + response = await client.get_attachment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = attachment_service.GetAttachmentRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, attachment.Attachment) + assert response.name == "name_value" + assert response.filename == "filename_value" + assert response.mime_type == "mime_type_value" + assert response.size_bytes == 1089 + + +@pytest.mark.asyncio +async def test_get_attachment_async_from_dict(): + await test_get_attachment_async(request_type=dict) + + +def test_get_attachment_field_headers(): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = attachment_service.GetAttachmentRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_attachment), "__call__") as call: + call.return_value = attachment.Attachment() + client.get_attachment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_attachment_field_headers_async(): + client = CaseAttachmentServiceAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = attachment_service.GetAttachmentRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_attachment), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + attachment.Attachment() + ) + await client.get_attachment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_attachment_flattened(): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_attachment), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = attachment.Attachment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_attachment( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_attachment_flattened_error(): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_attachment( + attachment_service.GetAttachmentRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_attachment_flattened_async(): + client = CaseAttachmentServiceAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_attachment), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = attachment.Attachment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + attachment.Attachment() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_attachment( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_attachment_flattened_error_async(): + client = CaseAttachmentServiceAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_attachment( + attachment_service.GetAttachmentRequest(), + name="name_value", + ) + + def test_list_attachments_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -1962,8 +2299,256 @@ def test_list_attachments_rest_required_fields( jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = attachment_service.ListAttachmentsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = attachment_service.ListAttachmentsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_attachments(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert sorted(expected_params) == sorted(actual_params) + + +def test_list_attachments_rest_unset_required_fields(): + transport = transports.CaseAttachmentServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_attachments._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_attachments_rest_flattened(): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = attachment_service.ListAttachmentsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/cases/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = attachment_service.ListAttachmentsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_attachments(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/cases/*}/attachments" % client.transport._host, + args[1], + ) + + +def test_list_attachments_rest_flattened_error(transport: str = "rest"): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_attachments( + attachment_service.ListAttachmentsRequest(), + parent="parent_value", + ) + + +def test_list_attachments_rest_pager(transport: str = "rest"): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + attachment_service.ListAttachmentsResponse( + attachments=[ + attachment.Attachment(), + attachment.Attachment(), + attachment.Attachment(), + ], + next_page_token="abc", + ), + attachment_service.ListAttachmentsResponse( + attachments=[], + next_page_token="def", + ), + attachment_service.ListAttachmentsResponse( + attachments=[ + attachment.Attachment(), + ], + next_page_token="ghi", + ), + attachment_service.ListAttachmentsResponse( + attachments=[ + attachment.Attachment(), + attachment.Attachment(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + attachment_service.ListAttachmentsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/cases/sample2"} + + pager = client.list_attachments(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, attachment.Attachment) for i in results) + + pages = list(client.list_attachments(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_attachment_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_attachment in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_attachment] = mock_rpc + + request = {} + client.get_attachment(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_attachment(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_attachment_rest_required_fields( + request_type=attachment_service.GetAttachmentRequest, +): + transport_class = transports.CaseAttachmentServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_attachment._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_attachment._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = CaseAttachmentServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -1972,7 +2557,7 @@ def test_list_attachments_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = attachment_service.ListAttachmentsResponse() + return_value = attachment.Attachment() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -1993,38 +2578,30 @@ def test_list_attachments_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = attachment_service.ListAttachmentsResponse.pb(return_value) + return_value = attachment.Attachment.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_attachments(request) + response = client.get_attachment(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert sorted(expected_params) == sorted(actual_params) -def test_list_attachments_rest_unset_required_fields(): +def test_get_attachment_rest_unset_required_fields(): transport = transports.CaseAttachmentServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.list_attachments._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) + unset_fields = transport.get_attachment._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -def test_list_attachments_rest_flattened(): +def test_get_attachment_rest_flattened(): client = CaseAttachmentServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -2033,14 +2610,14 @@ def test_list_attachments_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = attachment_service.ListAttachmentsResponse() + return_value = attachment.Attachment() # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/cases/sample2"} + sample_request = {"name": "sample1/sample2/cases/sample3/attachments/sample4"} # get truthy value for each flattened field mock_args = dict( - parent="parent_value", + name="name_value", ) mock_args.update(sample_request) @@ -2048,25 +2625,24 @@ def test_list_attachments_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = attachment_service.ListAttachmentsResponse.pb(return_value) + return_value = attachment.Attachment.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_attachments(**mock_args) + client.get_attachment(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/cases/*}/attachments" % client.transport._host, - args[1], + "%s/v2/{name=*/*/cases/*/attachments/*}" % client.transport._host, args[1] ) -def test_list_attachments_rest_flattened_error(transport: str = "rest"): +def test_get_attachment_rest_flattened_error(transport: str = "rest"): client = CaseAttachmentServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -2075,73 +2651,10 @@ def test_list_attachments_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_attachments( - attachment_service.ListAttachmentsRequest(), - parent="parent_value", - ) - - -def test_list_attachments_rest_pager(transport: str = "rest"): - client = CaseAttachmentServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - attachment_service.ListAttachmentsResponse( - attachments=[ - attachment.Attachment(), - attachment.Attachment(), - attachment.Attachment(), - ], - next_page_token="abc", - ), - attachment_service.ListAttachmentsResponse( - attachments=[], - next_page_token="def", - ), - attachment_service.ListAttachmentsResponse( - attachments=[ - attachment.Attachment(), - ], - next_page_token="ghi", - ), - attachment_service.ListAttachmentsResponse( - attachments=[ - attachment.Attachment(), - attachment.Attachment(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - attachment_service.ListAttachmentsResponse.to_json(x) for x in response + client.get_attachment( + attachment_service.GetAttachmentRequest(), + name="name_value", ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/cases/sample2"} - - pager = client.list_attachments(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, attachment.Attachment) for i in results) - - pages = list(client.list_attachments(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token def test_credentials_transport_error(): @@ -2271,6 +2784,27 @@ def test_list_attachments_empty_call_grpc(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_attachment_empty_call_grpc(): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_attachment), "__call__") as call: + call.return_value = attachment.Attachment() + client.get_attachment(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = attachment_service.GetAttachmentRequest() + + assert args[0] == request_msg + + def test_transport_kind_grpc_asyncio(): transport = CaseAttachmentServiceAsyncClient.get_transport_class("grpc_asyncio")( credentials=async_anonymous_credentials() @@ -2312,6 +2846,36 @@ async def test_list_attachments_empty_call_grpc_asyncio(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_attachment_empty_call_grpc_asyncio(): + client = CaseAttachmentServiceAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_attachment), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + attachment.Attachment( + name="name_value", + filename="filename_value", + mime_type="mime_type_value", + size_bytes=1089, + ) + ) + await client.get_attachment(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = attachment_service.GetAttachmentRequest() + + assert args[0] == request_msg + + def test_transport_kind_rest(): transport = CaseAttachmentServiceClient.get_transport_class("rest")( credentials=ga_credentials.AnonymousCredentials() @@ -2455,6 +3019,143 @@ def test_list_attachments_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() +def test_get_attachment_rest_bad_request( + request_type=attachment_service.GetAttachmentRequest, +): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "sample1/sample2/cases/sample3/attachments/sample4"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with ( + mock.patch.object(Session, "request") as req, + pytest.raises(core_exceptions.BadRequest), + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_attachment(request) + + +@pytest.mark.parametrize( + "request_type", + [ + attachment_service.GetAttachmentRequest, + dict, + ], +) +def test_get_attachment_rest_call_success(request_type): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "sample1/sample2/cases/sample3/attachments/sample4"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = attachment.Attachment( + name="name_value", + filename="filename_value", + mime_type="mime_type_value", + size_bytes=1089, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = attachment.Attachment.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_attachment(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, attachment.Attachment) + assert response.name == "name_value" + assert response.filename == "filename_value" + assert response.mime_type == "mime_type_value" + assert response.size_bytes == 1089 + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_attachment_rest_interceptors(null_interceptor): + transport = transports.CaseAttachmentServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.CaseAttachmentServiceRestInterceptor(), + ) + client = CaseAttachmentServiceClient(transport=transport) + + with ( + mock.patch.object(type(client.transport._session), "request") as req, + mock.patch.object(path_template, "transcode") as transcode, + mock.patch.object( + transports.CaseAttachmentServiceRestInterceptor, "post_get_attachment" + ) as post, + mock.patch.object( + transports.CaseAttachmentServiceRestInterceptor, + "post_get_attachment_with_metadata", + ) as post_with_metadata, + mock.patch.object( + transports.CaseAttachmentServiceRestInterceptor, "pre_get_attachment" + ) as pre, + ): + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = attachment_service.GetAttachmentRequest.pb( + attachment_service.GetAttachmentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = attachment.Attachment.to_json(attachment.Attachment()) + req.return_value.content = return_value + + request = attachment_service.GetAttachmentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = attachment.Attachment() + post_with_metadata.return_value = attachment.Attachment(), metadata + + client.get_attachment( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + def test_initialize_client_w_rest(): client = CaseAttachmentServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -2482,6 +3183,26 @@ def test_list_attachments_empty_call_rest(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_attachment_empty_call_rest(): + client = CaseAttachmentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_attachment), "__call__") as call: + client.get_attachment(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = attachment_service.GetAttachmentRequest() + + assert args[0] == request_msg + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = CaseAttachmentServiceClient( @@ -2514,7 +3235,10 @@ def test_case_attachment_service_base_transport(): # Every method on the transport should just blindly # raise NotImplementedError. - methods = ("list_attachments",) + methods = ( + "list_attachments", + "get_attachment", + ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) @@ -2786,6 +3510,9 @@ def test_case_attachment_service_client_transport_session_collision(transport_na session1 = client1.transport.list_attachments._session session2 = client2.transport.list_attachments._session assert session1 != session2 + session1 = client1.transport.get_attachment._session + session2 = client2.transport.get_attachment._session + assert session1 != session2 def test_case_attachment_service_grpc_transport_channel(): diff --git a/packages/google-cloud-support/tests/unit/gapic/support_v2/test_comment_service.py b/packages/google-cloud-support/tests/unit/gapic/support_v2/test_comment_service.py index f936a2a37680..a4117e70d405 100644 --- a/packages/google-cloud-support/tests/unit/gapic/support_v2/test_comment_service.py +++ b/packages/google-cloud-support/tests/unit/gapic/support_v2/test_comment_service.py @@ -2161,6 +2161,333 @@ async def test_create_comment_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + comment_service.GetCommentRequest, + dict, + ], +) +def test_get_comment(request_type, transport: str = "grpc"): + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_comment), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = comment.Comment( + name="name_value", + body="body_value", + plain_text_body="plain_text_body_value", + ) + response = client.get_comment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = comment_service.GetCommentRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, comment.Comment) + assert response.name == "name_value" + assert response.body == "body_value" + assert response.plain_text_body == "plain_text_body_value" + + +def test_get_comment_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = comment_service.GetCommentRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_comment), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_comment(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == comment_service.GetCommentRequest( + name="name_value", + ) + + +def test_get_comment_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_comment in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_comment] = mock_rpc + request = {} + client.get_comment(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_comment(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_comment_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = CommentServiceAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_comment + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_comment + ] = mock_rpc + + request = {} + await client.get_comment(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_comment(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_comment_async( + transport: str = "grpc_asyncio", request_type=comment_service.GetCommentRequest +): + client = CommentServiceAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_comment), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + comment.Comment( + name="name_value", + body="body_value", + plain_text_body="plain_text_body_value", + ) + ) + response = await client.get_comment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = comment_service.GetCommentRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, comment.Comment) + assert response.name == "name_value" + assert response.body == "body_value" + assert response.plain_text_body == "plain_text_body_value" + + +@pytest.mark.asyncio +async def test_get_comment_async_from_dict(): + await test_get_comment_async(request_type=dict) + + +def test_get_comment_field_headers(): + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = comment_service.GetCommentRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_comment), "__call__") as call: + call.return_value = comment.Comment() + client.get_comment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_comment_field_headers_async(): + client = CommentServiceAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = comment_service.GetCommentRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_comment), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(comment.Comment()) + await client.get_comment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_comment_flattened(): + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_comment), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = comment.Comment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_comment( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_comment_flattened_error(): + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_comment( + comment_service.GetCommentRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_comment_flattened_async(): + client = CommentServiceAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_comment), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = comment.Comment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(comment.Comment()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_comment( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_comment_flattened_error_async(): + client = CommentServiceAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_comment( + comment_service.GetCommentRequest(), + name="name_value", + ) + + def test_list_comments_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -2393,30 +2720,218 @@ def test_list_comments_rest_pager(transport: str = "rest"): # Two responses for two calls response = response + response - # Wrap the values into proper Response objs - response = tuple( - comment_service.ListCommentsResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values + # Wrap the values into proper Response objs + response = tuple( + comment_service.ListCommentsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/cases/sample2"} + + pager = client.list_comments(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, comment.Comment) for i in results) + + pages = list(client.list_comments(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_create_comment_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_comment in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_comment] = mock_rpc + + request = {} + client.create_comment(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_comment(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_comment_rest_required_fields( + request_type=comment_service.CreateCommentRequest, +): + transport_class = transports.CommentServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_comment._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_comment._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gcs_comment.Comment() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gcs_comment.Comment.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_comment(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert sorted(expected_params) == sorted(actual_params) + + +def test_create_comment_rest_unset_required_fields(): + transport = transports.CommentServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_comment._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "comment", + ) + ) + ) + + +def test_create_comment_rest_flattened(): + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gcs_comment.Comment() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/cases/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + comment=gcs_comment.Comment(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gcs_comment.Comment.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_comment(**mock_args) - sample_request = {"parent": "projects/sample1/cases/sample2"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/cases/*}/comments" % client.transport._host, + args[1], + ) - pager = client.list_comments(request=sample_request) - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, comment.Comment) for i in results) +def test_create_comment_rest_flattened_error(transport: str = "rest"): + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) - pages = list(client.list_comments(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_comment( + comment_service.CreateCommentRequest(), + parent="parent_value", + comment=gcs_comment.Comment(name="name_value"), + ) -def test_create_comment_rest_use_cached_wrapped_rpc(): +def test_get_comment_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -2430,35 +2945,35 @@ def test_create_comment_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.create_comment in client._transport._wrapped_methods + assert client._transport.get_comment in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.create_comment] = mock_rpc + client._transport._wrapped_methods[client._transport.get_comment] = mock_rpc request = {} - client.create_comment(request) + client.get_comment(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.create_comment(request) + client.get_comment(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_create_comment_rest_required_fields( - request_type=comment_service.CreateCommentRequest, +def test_get_comment_rest_required_fields( + request_type=comment_service.GetCommentRequest, ): transport_class = transports.CommentServiceRestTransport request_init = {} - request_init["parent"] = "" + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -2469,21 +2984,21 @@ def test_create_comment_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_comment._get_unset_required_fields(jsonified_request) + ).get_comment._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_comment._get_unset_required_fields(jsonified_request) + ).get_comment._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = CommentServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -2492,7 +3007,7 @@ def test_create_comment_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = gcs_comment.Comment() + return_value = comment.Comment() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -2504,48 +3019,39 @@ def test_create_comment_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = gcs_comment.Comment.pb(return_value) + return_value = comment.Comment.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_comment(request) + response = client.get_comment(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert sorted(expected_params) == sorted(actual_params) -def test_create_comment_rest_unset_required_fields(): +def test_get_comment_rest_unset_required_fields(): transport = transports.CommentServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.create_comment._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "comment", - ) - ) - ) + unset_fields = transport.get_comment._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -def test_create_comment_rest_flattened(): +def test_get_comment_rest_flattened(): client = CommentServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -2554,15 +3060,14 @@ def test_create_comment_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = gcs_comment.Comment() + return_value = comment.Comment() # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/cases/sample2"} + sample_request = {"name": "sample1/sample2/cases/sample3/comments/sample4"} # get truthy value for each flattened field mock_args = dict( - parent="parent_value", - comment=gcs_comment.Comment(name="name_value"), + name="name_value", ) mock_args.update(sample_request) @@ -2570,25 +3075,24 @@ def test_create_comment_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = gcs_comment.Comment.pb(return_value) + return_value = comment.Comment.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_comment(**mock_args) + client.get_comment(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/cases/*}/comments" % client.transport._host, - args[1], + "%s/v2/{name=*/*/cases/*/comments/*}" % client.transport._host, args[1] ) -def test_create_comment_rest_flattened_error(transport: str = "rest"): +def test_get_comment_rest_flattened_error(transport: str = "rest"): client = CommentServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -2597,10 +3101,9 @@ def test_create_comment_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_comment( - comment_service.CreateCommentRequest(), - parent="parent_value", - comment=gcs_comment.Comment(name="name_value"), + client.get_comment( + comment_service.GetCommentRequest(), + name="name_value", ) @@ -2752,6 +3255,27 @@ def test_create_comment_empty_call_grpc(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_comment_empty_call_grpc(): + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_comment), "__call__") as call: + call.return_value = comment.Comment() + client.get_comment(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = comment_service.GetCommentRequest() + + assert args[0] == request_msg + + def test_transport_kind_grpc_asyncio(): transport = CommentServiceAsyncClient.get_transport_class("grpc_asyncio")( credentials=async_anonymous_credentials() @@ -2822,6 +3346,35 @@ async def test_create_comment_empty_call_grpc_asyncio(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_comment_empty_call_grpc_asyncio(): + client = CommentServiceAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_comment), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + comment.Comment( + name="name_value", + body="body_value", + plain_text_body="plain_text_body_value", + ) + ) + await client.get_comment(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = comment_service.GetCommentRequest() + + assert args[0] == request_msg + + def test_transport_kind_rest(): transport = CommentServiceClient.get_transport_class("rest")( credentials=ga_credentials.AnonymousCredentials() @@ -3178,6 +3731,138 @@ def test_create_comment_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() +def test_get_comment_rest_bad_request(request_type=comment_service.GetCommentRequest): + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "sample1/sample2/cases/sample3/comments/sample4"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with ( + mock.patch.object(Session, "request") as req, + pytest.raises(core_exceptions.BadRequest), + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_comment(request) + + +@pytest.mark.parametrize( + "request_type", + [ + comment_service.GetCommentRequest, + dict, + ], +) +def test_get_comment_rest_call_success(request_type): + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "sample1/sample2/cases/sample3/comments/sample4"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = comment.Comment( + name="name_value", + body="body_value", + plain_text_body="plain_text_body_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = comment.Comment.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_comment(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, comment.Comment) + assert response.name == "name_value" + assert response.body == "body_value" + assert response.plain_text_body == "plain_text_body_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_comment_rest_interceptors(null_interceptor): + transport = transports.CommentServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.CommentServiceRestInterceptor(), + ) + client = CommentServiceClient(transport=transport) + + with ( + mock.patch.object(type(client.transport._session), "request") as req, + mock.patch.object(path_template, "transcode") as transcode, + mock.patch.object( + transports.CommentServiceRestInterceptor, "post_get_comment" + ) as post, + mock.patch.object( + transports.CommentServiceRestInterceptor, "post_get_comment_with_metadata" + ) as post_with_metadata, + mock.patch.object( + transports.CommentServiceRestInterceptor, "pre_get_comment" + ) as pre, + ): + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = comment_service.GetCommentRequest.pb( + comment_service.GetCommentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = comment.Comment.to_json(comment.Comment()) + req.return_value.content = return_value + + request = comment_service.GetCommentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = comment.Comment() + post_with_metadata.return_value = comment.Comment(), metadata + + client.get_comment( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + def test_initialize_client_w_rest(): client = CommentServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -3225,6 +3910,26 @@ def test_create_comment_empty_call_rest(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_comment_empty_call_rest(): + client = CommentServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_comment), "__call__") as call: + client.get_comment(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = comment_service.GetCommentRequest() + + assert args[0] == request_msg + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = CommentServiceClient( @@ -3260,6 +3965,7 @@ def test_comment_service_base_transport(): methods = ( "list_comments", "create_comment", + "get_comment", ) for method in methods: with pytest.raises(NotImplementedError): @@ -3531,6 +4237,9 @@ def test_comment_service_client_transport_session_collision(transport_name): session1 = client1.transport.create_comment._session session2 = client2.transport.create_comment._session assert session1 != session2 + session1 = client1.transport.get_comment._session + session2 = client2.transport.get_comment._session + assert session1 != session2 def test_comment_service_grpc_transport_channel():