gcp.container.NodePool
Explore with Pulumi AI
Manages a node pool in a Google Kubernetes Engine (GKE) cluster separately from the cluster control plane. For more information see the official documentation and the API reference.
Example Usage
Using A Separately Managed Node Pool (Recommended)
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.serviceaccount.Account("default", {
    accountId: "service-account-id",
    displayName: "Service Account",
});
const primary = new gcp.container.Cluster("primary", {
    name: "my-gke-cluster",
    location: "us-central1",
    removeDefaultNodePool: true,
    initialNodeCount: 1,
});
const primaryPreemptibleNodes = new gcp.container.NodePool("primary_preemptible_nodes", {
    name: "my-node-pool",
    cluster: primary.id,
    nodeCount: 1,
    nodeConfig: {
        preemptible: true,
        machineType: "e2-medium",
        serviceAccount: _default.email,
        oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
    },
});
import pulumi
import pulumi_gcp as gcp
default = gcp.serviceaccount.Account("default",
    account_id="service-account-id",
    display_name="Service Account")
primary = gcp.container.Cluster("primary",
    name="my-gke-cluster",
    location="us-central1",
    remove_default_node_pool=True,
    initial_node_count=1)
primary_preemptible_nodes = gcp.container.NodePool("primary_preemptible_nodes",
    name="my-node-pool",
    cluster=primary.id,
    node_count=1,
    node_config={
        "preemptible": True,
        "machine_type": "e2-medium",
        "service_account": default.email,
        "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"],
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_default, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
			AccountId:   pulumi.String("service-account-id"),
			DisplayName: pulumi.String("Service Account"),
		})
		if err != nil {
			return err
		}
		primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
			Name:                  pulumi.String("my-gke-cluster"),
			Location:              pulumi.String("us-central1"),
			RemoveDefaultNodePool: pulumi.Bool(true),
			InitialNodeCount:      pulumi.Int(1),
		})
		if err != nil {
			return err
		}
		_, err = container.NewNodePool(ctx, "primary_preemptible_nodes", &container.NodePoolArgs{
			Name:      pulumi.String("my-node-pool"),
			Cluster:   primary.ID(),
			NodeCount: pulumi.Int(1),
			NodeConfig: &container.NodePoolNodeConfigArgs{
				Preemptible:    pulumi.Bool(true),
				MachineType:    pulumi.String("e2-medium"),
				ServiceAccount: _default.Email,
				OauthScopes: pulumi.StringArray{
					pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var @default = new Gcp.ServiceAccount.Account("default", new()
    {
        AccountId = "service-account-id",
        DisplayName = "Service Account",
    });
    var primary = new Gcp.Container.Cluster("primary", new()
    {
        Name = "my-gke-cluster",
        Location = "us-central1",
        RemoveDefaultNodePool = true,
        InitialNodeCount = 1,
    });
    var primaryPreemptibleNodes = new Gcp.Container.NodePool("primary_preemptible_nodes", new()
    {
        Name = "my-node-pool",
        Cluster = primary.Id,
        NodeCount = 1,
        NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
        {
            Preemptible = true,
            MachineType = "e2-medium",
            ServiceAccount = @default.Email,
            OauthScopes = new[]
            {
                "https://www.googleapis.com/auth/cloud-platform",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceaccount.Account;
import com.pulumi.gcp.serviceaccount.AccountArgs;
import com.pulumi.gcp.container.Cluster;
import com.pulumi.gcp.container.ClusterArgs;
import com.pulumi.gcp.container.NodePool;
import com.pulumi.gcp.container.NodePoolArgs;
import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var default_ = new Account("default", AccountArgs.builder()
            .accountId("service-account-id")
            .displayName("Service Account")
            .build());
        var primary = new Cluster("primary", ClusterArgs.builder()
            .name("my-gke-cluster")
            .location("us-central1")
            .removeDefaultNodePool(true)
            .initialNodeCount(1)
            .build());
        var primaryPreemptibleNodes = new NodePool("primaryPreemptibleNodes", NodePoolArgs.builder()
            .name("my-node-pool")
            .cluster(primary.id())
            .nodeCount(1)
            .nodeConfig(NodePoolNodeConfigArgs.builder()
                .preemptible(true)
                .machineType("e2-medium")
                .serviceAccount(default_.email())
                .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                .build())
            .build());
    }
}
resources:
  default:
    type: gcp:serviceaccount:Account
    properties:
      accountId: service-account-id
      displayName: Service Account
  primary:
    type: gcp:container:Cluster
    properties:
      name: my-gke-cluster
      location: us-central1
      removeDefaultNodePool: true
      initialNodeCount: 1
  primaryPreemptibleNodes:
    type: gcp:container:NodePool
    name: primary_preemptible_nodes
    properties:
      name: my-node-pool
      cluster: ${primary.id}
      nodeCount: 1
      nodeConfig:
        preemptible: true
        machineType: e2-medium
        serviceAccount: ${default.email}
        oauthScopes:
          - https://www.googleapis.com/auth/cloud-platform
2 Node Pools, 1 Separately Managed + The Default Node Pool
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.serviceaccount.Account("default", {
    accountId: "service-account-id",
    displayName: "Service Account",
});
const primary = new gcp.container.Cluster("primary", {
    name: "marcellus-wallace",
    location: "us-central1-a",
    initialNodeCount: 3,
    nodeLocations: ["us-central1-c"],
    nodeConfig: {
        serviceAccount: _default.email,
        oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
        guestAccelerators: [{
            type: "nvidia-tesla-k80",
            count: 1,
        }],
    },
});
const np = new gcp.container.NodePool("np", {
    name: "my-node-pool",
    cluster: primary.id,
    nodeConfig: {
        machineType: "e2-medium",
        serviceAccount: _default.email,
        oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
    },
});
import pulumi
import pulumi_gcp as gcp
default = gcp.serviceaccount.Account("default",
    account_id="service-account-id",
    display_name="Service Account")
primary = gcp.container.Cluster("primary",
    name="marcellus-wallace",
    location="us-central1-a",
    initial_node_count=3,
    node_locations=["us-central1-c"],
    node_config={
        "service_account": default.email,
        "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"],
        "guest_accelerators": [{
            "type": "nvidia-tesla-k80",
            "count": 1,
        }],
    })
np = gcp.container.NodePool("np",
    name="my-node-pool",
    cluster=primary.id,
    node_config={
        "machine_type": "e2-medium",
        "service_account": default.email,
        "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"],
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_default, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
			AccountId:   pulumi.String("service-account-id"),
			DisplayName: pulumi.String("Service Account"),
		})
		if err != nil {
			return err
		}
		primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
			Name:             pulumi.String("marcellus-wallace"),
			Location:         pulumi.String("us-central1-a"),
			InitialNodeCount: pulumi.Int(3),
			NodeLocations: pulumi.StringArray{
				pulumi.String("us-central1-c"),
			},
			NodeConfig: &container.ClusterNodeConfigArgs{
				ServiceAccount: _default.Email,
				OauthScopes: pulumi.StringArray{
					pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
				},
				GuestAccelerators: container.ClusterNodeConfigGuestAcceleratorArray{
					&container.ClusterNodeConfigGuestAcceleratorArgs{
						Type:  pulumi.String("nvidia-tesla-k80"),
						Count: pulumi.Int(1),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = container.NewNodePool(ctx, "np", &container.NodePoolArgs{
			Name:    pulumi.String("my-node-pool"),
			Cluster: primary.ID(),
			NodeConfig: &container.NodePoolNodeConfigArgs{
				MachineType:    pulumi.String("e2-medium"),
				ServiceAccount: _default.Email,
				OauthScopes: pulumi.StringArray{
					pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var @default = new Gcp.ServiceAccount.Account("default", new()
    {
        AccountId = "service-account-id",
        DisplayName = "Service Account",
    });
    var primary = new Gcp.Container.Cluster("primary", new()
    {
        Name = "marcellus-wallace",
        Location = "us-central1-a",
        InitialNodeCount = 3,
        NodeLocations = new[]
        {
            "us-central1-c",
        },
        NodeConfig = new Gcp.Container.Inputs.ClusterNodeConfigArgs
        {
            ServiceAccount = @default.Email,
            OauthScopes = new[]
            {
                "https://www.googleapis.com/auth/cloud-platform",
            },
            GuestAccelerators = new[]
            {
                new Gcp.Container.Inputs.ClusterNodeConfigGuestAcceleratorArgs
                {
                    Type = "nvidia-tesla-k80",
                    Count = 1,
                },
            },
        },
    });
    var np = new Gcp.Container.NodePool("np", new()
    {
        Name = "my-node-pool",
        Cluster = primary.Id,
        NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
        {
            MachineType = "e2-medium",
            ServiceAccount = @default.Email,
            OauthScopes = new[]
            {
                "https://www.googleapis.com/auth/cloud-platform",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceaccount.Account;
import com.pulumi.gcp.serviceaccount.AccountArgs;
import com.pulumi.gcp.container.Cluster;
import com.pulumi.gcp.container.ClusterArgs;
import com.pulumi.gcp.container.inputs.ClusterNodeConfigArgs;
import com.pulumi.gcp.container.NodePool;
import com.pulumi.gcp.container.NodePoolArgs;
import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var default_ = new Account("default", AccountArgs.builder()
            .accountId("service-account-id")
            .displayName("Service Account")
            .build());
        var primary = new Cluster("primary", ClusterArgs.builder()
            .name("marcellus-wallace")
            .location("us-central1-a")
            .initialNodeCount(3)
            .nodeLocations("us-central1-c")
            .nodeConfig(ClusterNodeConfigArgs.builder()
                .serviceAccount(default_.email())
                .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                .guestAccelerators(ClusterNodeConfigGuestAcceleratorArgs.builder()
                    .type("nvidia-tesla-k80")
                    .count(1)
                    .build())
                .build())
            .build());
        var np = new NodePool("np", NodePoolArgs.builder()
            .name("my-node-pool")
            .cluster(primary.id())
            .nodeConfig(NodePoolNodeConfigArgs.builder()
                .machineType("e2-medium")
                .serviceAccount(default_.email())
                .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                .build())
            .build());
    }
}
resources:
  default:
    type: gcp:serviceaccount:Account
    properties:
      accountId: service-account-id
      displayName: Service Account
  np:
    type: gcp:container:NodePool
    properties:
      name: my-node-pool
      cluster: ${primary.id}
      nodeConfig:
        machineType: e2-medium
        serviceAccount: ${default.email}
        oauthScopes:
          - https://www.googleapis.com/auth/cloud-platform
  primary:
    type: gcp:container:Cluster
    properties:
      name: marcellus-wallace
      location: us-central1-a
      initialNodeCount: 3
      nodeLocations:
        - us-central1-c
      nodeConfig:
        serviceAccount: ${default.email}
        oauthScopes:
          - https://www.googleapis.com/auth/cloud-platform
        guestAccelerators:
          - type: nvidia-tesla-k80
            count: 1
Create NodePool Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new NodePool(name: string, args: NodePoolArgs, opts?: CustomResourceOptions);@overload
def NodePool(resource_name: str,
             args: NodePoolArgs,
             opts: Optional[ResourceOptions] = None)
@overload
def NodePool(resource_name: str,
             opts: Optional[ResourceOptions] = None,
             cluster: Optional[str] = None,
             network_config: Optional[NodePoolNetworkConfigArgs] = None,
             name_prefix: Optional[str] = None,
             location: Optional[str] = None,
             management: Optional[NodePoolManagementArgs] = None,
             node_config: Optional[NodePoolNodeConfigArgs] = None,
             name: Optional[str] = None,
             initial_node_count: Optional[int] = None,
             autoscaling: Optional[NodePoolAutoscalingArgs] = None,
             max_pods_per_node: Optional[int] = None,
             node_count: Optional[int] = None,
             node_locations: Optional[Sequence[str]] = None,
             placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
             project: Optional[str] = None,
             queued_provisioning: Optional[NodePoolQueuedProvisioningArgs] = None,
             upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
             version: Optional[str] = None)func NewNodePool(ctx *Context, name string, args NodePoolArgs, opts ...ResourceOption) (*NodePool, error)public NodePool(string name, NodePoolArgs args, CustomResourceOptions? opts = null)
public NodePool(String name, NodePoolArgs args)
public NodePool(String name, NodePoolArgs args, CustomResourceOptions options)
type: gcp:container:NodePool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var nodePoolResource = new Gcp.Container.NodePool("nodePoolResource", new()
{
    Cluster = "string",
    NetworkConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigArgs
    {
        AdditionalNodeNetworkConfigs = new[]
        {
            new Gcp.Container.Inputs.NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs
            {
                Network = "string",
                Subnetwork = "string",
            },
        },
        AdditionalPodNetworkConfigs = new[]
        {
            new Gcp.Container.Inputs.NodePoolNetworkConfigAdditionalPodNetworkConfigArgs
            {
                MaxPodsPerNode = 0,
                SecondaryPodRange = "string",
                Subnetwork = "string",
            },
        },
        CreatePodRange = false,
        EnablePrivateNodes = false,
        NetworkPerformanceConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigNetworkPerformanceConfigArgs
        {
            TotalEgressBandwidthTier = "string",
        },
        PodCidrOverprovisionConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigPodCidrOverprovisionConfigArgs
        {
            Disabled = false,
        },
        PodIpv4CidrBlock = "string",
        PodRange = "string",
    },
    NamePrefix = "string",
    Location = "string",
    Management = new Gcp.Container.Inputs.NodePoolManagementArgs
    {
        AutoRepair = false,
        AutoUpgrade = false,
    },
    NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
    {
        AdvancedMachineFeatures = new Gcp.Container.Inputs.NodePoolNodeConfigAdvancedMachineFeaturesArgs
        {
            ThreadsPerCore = 0,
            EnableNestedVirtualization = false,
        },
        BootDiskKmsKey = "string",
        ConfidentialNodes = new Gcp.Container.Inputs.NodePoolNodeConfigConfidentialNodesArgs
        {
            Enabled = false,
        },
        ContainerdConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigArgs
        {
            PrivateRegistryAccessConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs
            {
                Enabled = false,
                CertificateAuthorityDomainConfigs = new[]
                {
                    new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs
                    {
                        Fqdns = new[]
                        {
                            "string",
                        },
                        GcpSecretManagerCertificateConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs
                        {
                            SecretUri = "string",
                        },
                    },
                },
            },
        },
        DiskSizeGb = 0,
        DiskType = "string",
        EffectiveTaints = new[]
        {
            new Gcp.Container.Inputs.NodePoolNodeConfigEffectiveTaintArgs
            {
                Effect = "string",
                Key = "string",
                Value = "string",
            },
        },
        EnableConfidentialStorage = false,
        EphemeralStorageConfig = new Gcp.Container.Inputs.NodePoolNodeConfigEphemeralStorageConfigArgs
        {
            LocalSsdCount = 0,
        },
        EphemeralStorageLocalSsdConfig = new Gcp.Container.Inputs.NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs
        {
            LocalSsdCount = 0,
        },
        FastSocket = new Gcp.Container.Inputs.NodePoolNodeConfigFastSocketArgs
        {
            Enabled = false,
        },
        GcfsConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGcfsConfigArgs
        {
            Enabled = false,
        },
        GuestAccelerators = new[]
        {
            new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorArgs
            {
                Count = 0,
                Type = "string",
                GpuDriverInstallationConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs
                {
                    GpuDriverVersion = "string",
                },
                GpuPartitionSize = "string",
                GpuSharingConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs
                {
                    GpuSharingStrategy = "string",
                    MaxSharedClientsPerGpu = 0,
                },
            },
        },
        Gvnic = new Gcp.Container.Inputs.NodePoolNodeConfigGvnicArgs
        {
            Enabled = false,
        },
        HostMaintenancePolicy = new Gcp.Container.Inputs.NodePoolNodeConfigHostMaintenancePolicyArgs
        {
            MaintenanceInterval = "string",
        },
        ImageType = "string",
        KubeletConfig = new Gcp.Container.Inputs.NodePoolNodeConfigKubeletConfigArgs
        {
            AllowedUnsafeSysctls = new[]
            {
                "string",
            },
            ContainerLogMaxFiles = 0,
            ContainerLogMaxSize = "string",
            CpuCfsQuota = false,
            CpuCfsQuotaPeriod = "string",
            CpuManagerPolicy = "string",
            ImageGcHighThresholdPercent = 0,
            ImageGcLowThresholdPercent = 0,
            ImageMaximumGcAge = "string",
            ImageMinimumGcAge = "string",
            InsecureKubeletReadonlyPortEnabled = "string",
            PodPidsLimit = 0,
        },
        Labels = 
        {
            { "string", "string" },
        },
        LinuxNodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLinuxNodeConfigArgs
        {
            CgroupMode = "string",
            HugepagesConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs
            {
                HugepageSize1g = 0,
                HugepageSize2m = 0,
            },
            Sysctls = 
            {
                { "string", "string" },
            },
        },
        LocalNvmeSsdBlockConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs
        {
            LocalSsdCount = 0,
        },
        LocalSsdCount = 0,
        LocalSsdEncryptionMode = "string",
        LoggingVariant = "string",
        MachineType = "string",
        MaxRunDuration = "string",
        Metadata = 
        {
            { "string", "string" },
        },
        MinCpuPlatform = "string",
        NodeGroup = "string",
        OauthScopes = new[]
        {
            "string",
        },
        Preemptible = false,
        ReservationAffinity = new Gcp.Container.Inputs.NodePoolNodeConfigReservationAffinityArgs
        {
            ConsumeReservationType = "string",
            Key = "string",
            Values = new[]
            {
                "string",
            },
        },
        ResourceLabels = 
        {
            { "string", "string" },
        },
        ResourceManagerTags = 
        {
            { "string", "string" },
        },
        SandboxConfig = new Gcp.Container.Inputs.NodePoolNodeConfigSandboxConfigArgs
        {
            SandboxType = "string",
        },
        SecondaryBootDisks = new[]
        {
            new Gcp.Container.Inputs.NodePoolNodeConfigSecondaryBootDiskArgs
            {
                DiskImage = "string",
                Mode = "string",
            },
        },
        ServiceAccount = "string",
        ShieldedInstanceConfig = new Gcp.Container.Inputs.NodePoolNodeConfigShieldedInstanceConfigArgs
        {
            EnableIntegrityMonitoring = false,
            EnableSecureBoot = false,
        },
        SoleTenantConfig = new Gcp.Container.Inputs.NodePoolNodeConfigSoleTenantConfigArgs
        {
            NodeAffinities = new[]
            {
                new Gcp.Container.Inputs.NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs
                {
                    Key = "string",
                    Operator = "string",
                    Values = new[]
                    {
                        "string",
                    },
                },
            },
        },
        Spot = false,
        StoragePools = new[]
        {
            "string",
        },
        Tags = new[]
        {
            "string",
        },
        Taints = new[]
        {
            new Gcp.Container.Inputs.NodePoolNodeConfigTaintArgs
            {
                Effect = "string",
                Key = "string",
                Value = "string",
            },
        },
        WorkloadMetadataConfig = new Gcp.Container.Inputs.NodePoolNodeConfigWorkloadMetadataConfigArgs
        {
            Mode = "string",
        },
    },
    Name = "string",
    InitialNodeCount = 0,
    Autoscaling = new Gcp.Container.Inputs.NodePoolAutoscalingArgs
    {
        LocationPolicy = "string",
        MaxNodeCount = 0,
        MinNodeCount = 0,
        TotalMaxNodeCount = 0,
        TotalMinNodeCount = 0,
    },
    MaxPodsPerNode = 0,
    NodeCount = 0,
    NodeLocations = new[]
    {
        "string",
    },
    PlacementPolicy = new Gcp.Container.Inputs.NodePoolPlacementPolicyArgs
    {
        Type = "string",
        PolicyName = "string",
        TpuTopology = "string",
    },
    Project = "string",
    QueuedProvisioning = new Gcp.Container.Inputs.NodePoolQueuedProvisioningArgs
    {
        Enabled = false,
    },
    UpgradeSettings = new Gcp.Container.Inputs.NodePoolUpgradeSettingsArgs
    {
        BlueGreenSettings = new Gcp.Container.Inputs.NodePoolUpgradeSettingsBlueGreenSettingsArgs
        {
            StandardRolloutPolicy = new Gcp.Container.Inputs.NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs
            {
                BatchNodeCount = 0,
                BatchPercentage = 0,
                BatchSoakDuration = "string",
            },
            NodePoolSoakDuration = "string",
        },
        MaxSurge = 0,
        MaxUnavailable = 0,
        Strategy = "string",
    },
    Version = "string",
});
example, err := container.NewNodePool(ctx, "nodePoolResource", &container.NodePoolArgs{
	Cluster: pulumi.String("string"),
	NetworkConfig: &container.NodePoolNetworkConfigArgs{
		AdditionalNodeNetworkConfigs: container.NodePoolNetworkConfigAdditionalNodeNetworkConfigArray{
			&container.NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs{
				Network:    pulumi.String("string"),
				Subnetwork: pulumi.String("string"),
			},
		},
		AdditionalPodNetworkConfigs: container.NodePoolNetworkConfigAdditionalPodNetworkConfigArray{
			&container.NodePoolNetworkConfigAdditionalPodNetworkConfigArgs{
				MaxPodsPerNode:    pulumi.Int(0),
				SecondaryPodRange: pulumi.String("string"),
				Subnetwork:        pulumi.String("string"),
			},
		},
		CreatePodRange:     pulumi.Bool(false),
		EnablePrivateNodes: pulumi.Bool(false),
		NetworkPerformanceConfig: &container.NodePoolNetworkConfigNetworkPerformanceConfigArgs{
			TotalEgressBandwidthTier: pulumi.String("string"),
		},
		PodCidrOverprovisionConfig: &container.NodePoolNetworkConfigPodCidrOverprovisionConfigArgs{
			Disabled: pulumi.Bool(false),
		},
		PodIpv4CidrBlock: pulumi.String("string"),
		PodRange:         pulumi.String("string"),
	},
	NamePrefix: pulumi.String("string"),
	Location:   pulumi.String("string"),
	Management: &container.NodePoolManagementArgs{
		AutoRepair:  pulumi.Bool(false),
		AutoUpgrade: pulumi.Bool(false),
	},
	NodeConfig: &container.NodePoolNodeConfigArgs{
		AdvancedMachineFeatures: &container.NodePoolNodeConfigAdvancedMachineFeaturesArgs{
			ThreadsPerCore:             pulumi.Int(0),
			EnableNestedVirtualization: pulumi.Bool(false),
		},
		BootDiskKmsKey: pulumi.String("string"),
		ConfidentialNodes: &container.NodePoolNodeConfigConfidentialNodesArgs{
			Enabled: pulumi.Bool(false),
		},
		ContainerdConfig: &container.NodePoolNodeConfigContainerdConfigArgs{
			PrivateRegistryAccessConfig: &container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs{
				Enabled: pulumi.Bool(false),
				CertificateAuthorityDomainConfigs: container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArray{
					&container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs{
						Fqdns: pulumi.StringArray{
							pulumi.String("string"),
						},
						GcpSecretManagerCertificateConfig: &container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs{
							SecretUri: pulumi.String("string"),
						},
					},
				},
			},
		},
		DiskSizeGb: pulumi.Int(0),
		DiskType:   pulumi.String("string"),
		EffectiveTaints: container.NodePoolNodeConfigEffectiveTaintArray{
			&container.NodePoolNodeConfigEffectiveTaintArgs{
				Effect: pulumi.String("string"),
				Key:    pulumi.String("string"),
				Value:  pulumi.String("string"),
			},
		},
		EnableConfidentialStorage: pulumi.Bool(false),
		EphemeralStorageConfig: &container.NodePoolNodeConfigEphemeralStorageConfigArgs{
			LocalSsdCount: pulumi.Int(0),
		},
		EphemeralStorageLocalSsdConfig: &container.NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs{
			LocalSsdCount: pulumi.Int(0),
		},
		FastSocket: &container.NodePoolNodeConfigFastSocketArgs{
			Enabled: pulumi.Bool(false),
		},
		GcfsConfig: &container.NodePoolNodeConfigGcfsConfigArgs{
			Enabled: pulumi.Bool(false),
		},
		GuestAccelerators: container.NodePoolNodeConfigGuestAcceleratorArray{
			&container.NodePoolNodeConfigGuestAcceleratorArgs{
				Count: pulumi.Int(0),
				Type:  pulumi.String("string"),
				GpuDriverInstallationConfig: &container.NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs{
					GpuDriverVersion: pulumi.String("string"),
				},
				GpuPartitionSize: pulumi.String("string"),
				GpuSharingConfig: &container.NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs{
					GpuSharingStrategy:     pulumi.String("string"),
					MaxSharedClientsPerGpu: pulumi.Int(0),
				},
			},
		},
		Gvnic: &container.NodePoolNodeConfigGvnicArgs{
			Enabled: pulumi.Bool(false),
		},
		HostMaintenancePolicy: &container.NodePoolNodeConfigHostMaintenancePolicyArgs{
			MaintenanceInterval: pulumi.String("string"),
		},
		ImageType: pulumi.String("string"),
		KubeletConfig: &container.NodePoolNodeConfigKubeletConfigArgs{
			AllowedUnsafeSysctls: pulumi.StringArray{
				pulumi.String("string"),
			},
			ContainerLogMaxFiles:               pulumi.Int(0),
			ContainerLogMaxSize:                pulumi.String("string"),
			CpuCfsQuota:                        pulumi.Bool(false),
			CpuCfsQuotaPeriod:                  pulumi.String("string"),
			CpuManagerPolicy:                   pulumi.String("string"),
			ImageGcHighThresholdPercent:        pulumi.Int(0),
			ImageGcLowThresholdPercent:         pulumi.Int(0),
			ImageMaximumGcAge:                  pulumi.String("string"),
			ImageMinimumGcAge:                  pulumi.String("string"),
			InsecureKubeletReadonlyPortEnabled: pulumi.String("string"),
			PodPidsLimit:                       pulumi.Int(0),
		},
		Labels: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		LinuxNodeConfig: &container.NodePoolNodeConfigLinuxNodeConfigArgs{
			CgroupMode: pulumi.String("string"),
			HugepagesConfig: &container.NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs{
				HugepageSize1g: pulumi.Int(0),
				HugepageSize2m: pulumi.Int(0),
			},
			Sysctls: pulumi.StringMap{
				"string": pulumi.String("string"),
			},
		},
		LocalNvmeSsdBlockConfig: &container.NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs{
			LocalSsdCount: pulumi.Int(0),
		},
		LocalSsdCount:          pulumi.Int(0),
		LocalSsdEncryptionMode: pulumi.String("string"),
		LoggingVariant:         pulumi.String("string"),
		MachineType:            pulumi.String("string"),
		MaxRunDuration:         pulumi.String("string"),
		Metadata: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		MinCpuPlatform: pulumi.String("string"),
		NodeGroup:      pulumi.String("string"),
		OauthScopes: pulumi.StringArray{
			pulumi.String("string"),
		},
		Preemptible: pulumi.Bool(false),
		ReservationAffinity: &container.NodePoolNodeConfigReservationAffinityArgs{
			ConsumeReservationType: pulumi.String("string"),
			Key:                    pulumi.String("string"),
			Values: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
		ResourceLabels: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		ResourceManagerTags: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		SandboxConfig: &container.NodePoolNodeConfigSandboxConfigArgs{
			SandboxType: pulumi.String("string"),
		},
		SecondaryBootDisks: container.NodePoolNodeConfigSecondaryBootDiskArray{
			&container.NodePoolNodeConfigSecondaryBootDiskArgs{
				DiskImage: pulumi.String("string"),
				Mode:      pulumi.String("string"),
			},
		},
		ServiceAccount: pulumi.String("string"),
		ShieldedInstanceConfig: &container.NodePoolNodeConfigShieldedInstanceConfigArgs{
			EnableIntegrityMonitoring: pulumi.Bool(false),
			EnableSecureBoot:          pulumi.Bool(false),
		},
		SoleTenantConfig: &container.NodePoolNodeConfigSoleTenantConfigArgs{
			NodeAffinities: container.NodePoolNodeConfigSoleTenantConfigNodeAffinityArray{
				&container.NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs{
					Key:      pulumi.String("string"),
					Operator: pulumi.String("string"),
					Values: pulumi.StringArray{
						pulumi.String("string"),
					},
				},
			},
		},
		Spot: pulumi.Bool(false),
		StoragePools: pulumi.StringArray{
			pulumi.String("string"),
		},
		Tags: pulumi.StringArray{
			pulumi.String("string"),
		},
		Taints: container.NodePoolNodeConfigTaintArray{
			&container.NodePoolNodeConfigTaintArgs{
				Effect: pulumi.String("string"),
				Key:    pulumi.String("string"),
				Value:  pulumi.String("string"),
			},
		},
		WorkloadMetadataConfig: &container.NodePoolNodeConfigWorkloadMetadataConfigArgs{
			Mode: pulumi.String("string"),
		},
	},
	Name:             pulumi.String("string"),
	InitialNodeCount: pulumi.Int(0),
	Autoscaling: &container.NodePoolAutoscalingArgs{
		LocationPolicy:    pulumi.String("string"),
		MaxNodeCount:      pulumi.Int(0),
		MinNodeCount:      pulumi.Int(0),
		TotalMaxNodeCount: pulumi.Int(0),
		TotalMinNodeCount: pulumi.Int(0),
	},
	MaxPodsPerNode: pulumi.Int(0),
	NodeCount:      pulumi.Int(0),
	NodeLocations: pulumi.StringArray{
		pulumi.String("string"),
	},
	PlacementPolicy: &container.NodePoolPlacementPolicyArgs{
		Type:        pulumi.String("string"),
		PolicyName:  pulumi.String("string"),
		TpuTopology: pulumi.String("string"),
	},
	Project: pulumi.String("string"),
	QueuedProvisioning: &container.NodePoolQueuedProvisioningArgs{
		Enabled: pulumi.Bool(false),
	},
	UpgradeSettings: &container.NodePoolUpgradeSettingsArgs{
		BlueGreenSettings: &container.NodePoolUpgradeSettingsBlueGreenSettingsArgs{
			StandardRolloutPolicy: &container.NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs{
				BatchNodeCount:    pulumi.Int(0),
				BatchPercentage:   pulumi.Float64(0),
				BatchSoakDuration: pulumi.String("string"),
			},
			NodePoolSoakDuration: pulumi.String("string"),
		},
		MaxSurge:       pulumi.Int(0),
		MaxUnavailable: pulumi.Int(0),
		Strategy:       pulumi.String("string"),
	},
	Version: pulumi.String("string"),
})
var nodePoolResource = new NodePool("nodePoolResource", NodePoolArgs.builder()
    .cluster("string")
    .networkConfig(NodePoolNetworkConfigArgs.builder()
        .additionalNodeNetworkConfigs(NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs.builder()
            .network("string")
            .subnetwork("string")
            .build())
        .additionalPodNetworkConfigs(NodePoolNetworkConfigAdditionalPodNetworkConfigArgs.builder()
            .maxPodsPerNode(0)
            .secondaryPodRange("string")
            .subnetwork("string")
            .build())
        .createPodRange(false)
        .enablePrivateNodes(false)
        .networkPerformanceConfig(NodePoolNetworkConfigNetworkPerformanceConfigArgs.builder()
            .totalEgressBandwidthTier("string")
            .build())
        .podCidrOverprovisionConfig(NodePoolNetworkConfigPodCidrOverprovisionConfigArgs.builder()
            .disabled(false)
            .build())
        .podIpv4CidrBlock("string")
        .podRange("string")
        .build())
    .namePrefix("string")
    .location("string")
    .management(NodePoolManagementArgs.builder()
        .autoRepair(false)
        .autoUpgrade(false)
        .build())
    .nodeConfig(NodePoolNodeConfigArgs.builder()
        .advancedMachineFeatures(NodePoolNodeConfigAdvancedMachineFeaturesArgs.builder()
            .threadsPerCore(0)
            .enableNestedVirtualization(false)
            .build())
        .bootDiskKmsKey("string")
        .confidentialNodes(NodePoolNodeConfigConfidentialNodesArgs.builder()
            .enabled(false)
            .build())
        .containerdConfig(NodePoolNodeConfigContainerdConfigArgs.builder()
            .privateRegistryAccessConfig(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs.builder()
                .enabled(false)
                .certificateAuthorityDomainConfigs(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs.builder()
                    .fqdns("string")
                    .gcpSecretManagerCertificateConfig(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs.builder()
                        .secretUri("string")
                        .build())
                    .build())
                .build())
            .build())
        .diskSizeGb(0)
        .diskType("string")
        .effectiveTaints(NodePoolNodeConfigEffectiveTaintArgs.builder()
            .effect("string")
            .key("string")
            .value("string")
            .build())
        .enableConfidentialStorage(false)
        .ephemeralStorageConfig(NodePoolNodeConfigEphemeralStorageConfigArgs.builder()
            .localSsdCount(0)
            .build())
        .ephemeralStorageLocalSsdConfig(NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs.builder()
            .localSsdCount(0)
            .build())
        .fastSocket(NodePoolNodeConfigFastSocketArgs.builder()
            .enabled(false)
            .build())
        .gcfsConfig(NodePoolNodeConfigGcfsConfigArgs.builder()
            .enabled(false)
            .build())
        .guestAccelerators(NodePoolNodeConfigGuestAcceleratorArgs.builder()
            .count(0)
            .type("string")
            .gpuDriverInstallationConfig(NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs.builder()
                .gpuDriverVersion("string")
                .build())
            .gpuPartitionSize("string")
            .gpuSharingConfig(NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs.builder()
                .gpuSharingStrategy("string")
                .maxSharedClientsPerGpu(0)
                .build())
            .build())
        .gvnic(NodePoolNodeConfigGvnicArgs.builder()
            .enabled(false)
            .build())
        .hostMaintenancePolicy(NodePoolNodeConfigHostMaintenancePolicyArgs.builder()
            .maintenanceInterval("string")
            .build())
        .imageType("string")
        .kubeletConfig(NodePoolNodeConfigKubeletConfigArgs.builder()
            .allowedUnsafeSysctls("string")
            .containerLogMaxFiles(0)
            .containerLogMaxSize("string")
            .cpuCfsQuota(false)
            .cpuCfsQuotaPeriod("string")
            .cpuManagerPolicy("string")
            .imageGcHighThresholdPercent(0)
            .imageGcLowThresholdPercent(0)
            .imageMaximumGcAge("string")
            .imageMinimumGcAge("string")
            .insecureKubeletReadonlyPortEnabled("string")
            .podPidsLimit(0)
            .build())
        .labels(Map.of("string", "string"))
        .linuxNodeConfig(NodePoolNodeConfigLinuxNodeConfigArgs.builder()
            .cgroupMode("string")
            .hugepagesConfig(NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs.builder()
                .hugepageSize1g(0)
                .hugepageSize2m(0)
                .build())
            .sysctls(Map.of("string", "string"))
            .build())
        .localNvmeSsdBlockConfig(NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs.builder()
            .localSsdCount(0)
            .build())
        .localSsdCount(0)
        .localSsdEncryptionMode("string")
        .loggingVariant("string")
        .machineType("string")
        .maxRunDuration("string")
        .metadata(Map.of("string", "string"))
        .minCpuPlatform("string")
        .nodeGroup("string")
        .oauthScopes("string")
        .preemptible(false)
        .reservationAffinity(NodePoolNodeConfigReservationAffinityArgs.builder()
            .consumeReservationType("string")
            .key("string")
            .values("string")
            .build())
        .resourceLabels(Map.of("string", "string"))
        .resourceManagerTags(Map.of("string", "string"))
        .sandboxConfig(NodePoolNodeConfigSandboxConfigArgs.builder()
            .sandboxType("string")
            .build())
        .secondaryBootDisks(NodePoolNodeConfigSecondaryBootDiskArgs.builder()
            .diskImage("string")
            .mode("string")
            .build())
        .serviceAccount("string")
        .shieldedInstanceConfig(NodePoolNodeConfigShieldedInstanceConfigArgs.builder()
            .enableIntegrityMonitoring(false)
            .enableSecureBoot(false)
            .build())
        .soleTenantConfig(NodePoolNodeConfigSoleTenantConfigArgs.builder()
            .nodeAffinities(NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs.builder()
                .key("string")
                .operator("string")
                .values("string")
                .build())
            .build())
        .spot(false)
        .storagePools("string")
        .tags("string")
        .taints(NodePoolNodeConfigTaintArgs.builder()
            .effect("string")
            .key("string")
            .value("string")
            .build())
        .workloadMetadataConfig(NodePoolNodeConfigWorkloadMetadataConfigArgs.builder()
            .mode("string")
            .build())
        .build())
    .name("string")
    .initialNodeCount(0)
    .autoscaling(NodePoolAutoscalingArgs.builder()
        .locationPolicy("string")
        .maxNodeCount(0)
        .minNodeCount(0)
        .totalMaxNodeCount(0)
        .totalMinNodeCount(0)
        .build())
    .maxPodsPerNode(0)
    .nodeCount(0)
    .nodeLocations("string")
    .placementPolicy(NodePoolPlacementPolicyArgs.builder()
        .type("string")
        .policyName("string")
        .tpuTopology("string")
        .build())
    .project("string")
    .queuedProvisioning(NodePoolQueuedProvisioningArgs.builder()
        .enabled(false)
        .build())
    .upgradeSettings(NodePoolUpgradeSettingsArgs.builder()
        .blueGreenSettings(NodePoolUpgradeSettingsBlueGreenSettingsArgs.builder()
            .standardRolloutPolicy(NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs.builder()
                .batchNodeCount(0)
                .batchPercentage(0)
                .batchSoakDuration("string")
                .build())
            .nodePoolSoakDuration("string")
            .build())
        .maxSurge(0)
        .maxUnavailable(0)
        .strategy("string")
        .build())
    .version("string")
    .build());
node_pool_resource = gcp.container.NodePool("nodePoolResource",
    cluster="string",
    network_config={
        "additional_node_network_configs": [{
            "network": "string",
            "subnetwork": "string",
        }],
        "additional_pod_network_configs": [{
            "max_pods_per_node": 0,
            "secondary_pod_range": "string",
            "subnetwork": "string",
        }],
        "create_pod_range": False,
        "enable_private_nodes": False,
        "network_performance_config": {
            "total_egress_bandwidth_tier": "string",
        },
        "pod_cidr_overprovision_config": {
            "disabled": False,
        },
        "pod_ipv4_cidr_block": "string",
        "pod_range": "string",
    },
    name_prefix="string",
    location="string",
    management={
        "auto_repair": False,
        "auto_upgrade": False,
    },
    node_config={
        "advanced_machine_features": {
            "threads_per_core": 0,
            "enable_nested_virtualization": False,
        },
        "boot_disk_kms_key": "string",
        "confidential_nodes": {
            "enabled": False,
        },
        "containerd_config": {
            "private_registry_access_config": {
                "enabled": False,
                "certificate_authority_domain_configs": [{
                    "fqdns": ["string"],
                    "gcp_secret_manager_certificate_config": {
                        "secret_uri": "string",
                    },
                }],
            },
        },
        "disk_size_gb": 0,
        "disk_type": "string",
        "effective_taints": [{
            "effect": "string",
            "key": "string",
            "value": "string",
        }],
        "enable_confidential_storage": False,
        "ephemeral_storage_config": {
            "local_ssd_count": 0,
        },
        "ephemeral_storage_local_ssd_config": {
            "local_ssd_count": 0,
        },
        "fast_socket": {
            "enabled": False,
        },
        "gcfs_config": {
            "enabled": False,
        },
        "guest_accelerators": [{
            "count": 0,
            "type": "string",
            "gpu_driver_installation_config": {
                "gpu_driver_version": "string",
            },
            "gpu_partition_size": "string",
            "gpu_sharing_config": {
                "gpu_sharing_strategy": "string",
                "max_shared_clients_per_gpu": 0,
            },
        }],
        "gvnic": {
            "enabled": False,
        },
        "host_maintenance_policy": {
            "maintenance_interval": "string",
        },
        "image_type": "string",
        "kubelet_config": {
            "allowed_unsafe_sysctls": ["string"],
            "container_log_max_files": 0,
            "container_log_max_size": "string",
            "cpu_cfs_quota": False,
            "cpu_cfs_quota_period": "string",
            "cpu_manager_policy": "string",
            "image_gc_high_threshold_percent": 0,
            "image_gc_low_threshold_percent": 0,
            "image_maximum_gc_age": "string",
            "image_minimum_gc_age": "string",
            "insecure_kubelet_readonly_port_enabled": "string",
            "pod_pids_limit": 0,
        },
        "labels": {
            "string": "string",
        },
        "linux_node_config": {
            "cgroup_mode": "string",
            "hugepages_config": {
                "hugepage_size1g": 0,
                "hugepage_size2m": 0,
            },
            "sysctls": {
                "string": "string",
            },
        },
        "local_nvme_ssd_block_config": {
            "local_ssd_count": 0,
        },
        "local_ssd_count": 0,
        "local_ssd_encryption_mode": "string",
        "logging_variant": "string",
        "machine_type": "string",
        "max_run_duration": "string",
        "metadata": {
            "string": "string",
        },
        "min_cpu_platform": "string",
        "node_group": "string",
        "oauth_scopes": ["string"],
        "preemptible": False,
        "reservation_affinity": {
            "consume_reservation_type": "string",
            "key": "string",
            "values": ["string"],
        },
        "resource_labels": {
            "string": "string",
        },
        "resource_manager_tags": {
            "string": "string",
        },
        "sandbox_config": {
            "sandbox_type": "string",
        },
        "secondary_boot_disks": [{
            "disk_image": "string",
            "mode": "string",
        }],
        "service_account": "string",
        "shielded_instance_config": {
            "enable_integrity_monitoring": False,
            "enable_secure_boot": False,
        },
        "sole_tenant_config": {
            "node_affinities": [{
                "key": "string",
                "operator": "string",
                "values": ["string"],
            }],
        },
        "spot": False,
        "storage_pools": ["string"],
        "tags": ["string"],
        "taints": [{
            "effect": "string",
            "key": "string",
            "value": "string",
        }],
        "workload_metadata_config": {
            "mode": "string",
        },
    },
    name="string",
    initial_node_count=0,
    autoscaling={
        "location_policy": "string",
        "max_node_count": 0,
        "min_node_count": 0,
        "total_max_node_count": 0,
        "total_min_node_count": 0,
    },
    max_pods_per_node=0,
    node_count=0,
    node_locations=["string"],
    placement_policy={
        "type": "string",
        "policy_name": "string",
        "tpu_topology": "string",
    },
    project="string",
    queued_provisioning={
        "enabled": False,
    },
    upgrade_settings={
        "blue_green_settings": {
            "standard_rollout_policy": {
                "batch_node_count": 0,
                "batch_percentage": 0,
                "batch_soak_duration": "string",
            },
            "node_pool_soak_duration": "string",
        },
        "max_surge": 0,
        "max_unavailable": 0,
        "strategy": "string",
    },
    version="string")
const nodePoolResource = new gcp.container.NodePool("nodePoolResource", {
    cluster: "string",
    networkConfig: {
        additionalNodeNetworkConfigs: [{
            network: "string",
            subnetwork: "string",
        }],
        additionalPodNetworkConfigs: [{
            maxPodsPerNode: 0,
            secondaryPodRange: "string",
            subnetwork: "string",
        }],
        createPodRange: false,
        enablePrivateNodes: false,
        networkPerformanceConfig: {
            totalEgressBandwidthTier: "string",
        },
        podCidrOverprovisionConfig: {
            disabled: false,
        },
        podIpv4CidrBlock: "string",
        podRange: "string",
    },
    namePrefix: "string",
    location: "string",
    management: {
        autoRepair: false,
        autoUpgrade: false,
    },
    nodeConfig: {
        advancedMachineFeatures: {
            threadsPerCore: 0,
            enableNestedVirtualization: false,
        },
        bootDiskKmsKey: "string",
        confidentialNodes: {
            enabled: false,
        },
        containerdConfig: {
            privateRegistryAccessConfig: {
                enabled: false,
                certificateAuthorityDomainConfigs: [{
                    fqdns: ["string"],
                    gcpSecretManagerCertificateConfig: {
                        secretUri: "string",
                    },
                }],
            },
        },
        diskSizeGb: 0,
        diskType: "string",
        effectiveTaints: [{
            effect: "string",
            key: "string",
            value: "string",
        }],
        enableConfidentialStorage: false,
        ephemeralStorageConfig: {
            localSsdCount: 0,
        },
        ephemeralStorageLocalSsdConfig: {
            localSsdCount: 0,
        },
        fastSocket: {
            enabled: false,
        },
        gcfsConfig: {
            enabled: false,
        },
        guestAccelerators: [{
            count: 0,
            type: "string",
            gpuDriverInstallationConfig: {
                gpuDriverVersion: "string",
            },
            gpuPartitionSize: "string",
            gpuSharingConfig: {
                gpuSharingStrategy: "string",
                maxSharedClientsPerGpu: 0,
            },
        }],
        gvnic: {
            enabled: false,
        },
        hostMaintenancePolicy: {
            maintenanceInterval: "string",
        },
        imageType: "string",
        kubeletConfig: {
            allowedUnsafeSysctls: ["string"],
            containerLogMaxFiles: 0,
            containerLogMaxSize: "string",
            cpuCfsQuota: false,
            cpuCfsQuotaPeriod: "string",
            cpuManagerPolicy: "string",
            imageGcHighThresholdPercent: 0,
            imageGcLowThresholdPercent: 0,
            imageMaximumGcAge: "string",
            imageMinimumGcAge: "string",
            insecureKubeletReadonlyPortEnabled: "string",
            podPidsLimit: 0,
        },
        labels: {
            string: "string",
        },
        linuxNodeConfig: {
            cgroupMode: "string",
            hugepagesConfig: {
                hugepageSize1g: 0,
                hugepageSize2m: 0,
            },
            sysctls: {
                string: "string",
            },
        },
        localNvmeSsdBlockConfig: {
            localSsdCount: 0,
        },
        localSsdCount: 0,
        localSsdEncryptionMode: "string",
        loggingVariant: "string",
        machineType: "string",
        maxRunDuration: "string",
        metadata: {
            string: "string",
        },
        minCpuPlatform: "string",
        nodeGroup: "string",
        oauthScopes: ["string"],
        preemptible: false,
        reservationAffinity: {
            consumeReservationType: "string",
            key: "string",
            values: ["string"],
        },
        resourceLabels: {
            string: "string",
        },
        resourceManagerTags: {
            string: "string",
        },
        sandboxConfig: {
            sandboxType: "string",
        },
        secondaryBootDisks: [{
            diskImage: "string",
            mode: "string",
        }],
        serviceAccount: "string",
        shieldedInstanceConfig: {
            enableIntegrityMonitoring: false,
            enableSecureBoot: false,
        },
        soleTenantConfig: {
            nodeAffinities: [{
                key: "string",
                operator: "string",
                values: ["string"],
            }],
        },
        spot: false,
        storagePools: ["string"],
        tags: ["string"],
        taints: [{
            effect: "string",
            key: "string",
            value: "string",
        }],
        workloadMetadataConfig: {
            mode: "string",
        },
    },
    name: "string",
    initialNodeCount: 0,
    autoscaling: {
        locationPolicy: "string",
        maxNodeCount: 0,
        minNodeCount: 0,
        totalMaxNodeCount: 0,
        totalMinNodeCount: 0,
    },
    maxPodsPerNode: 0,
    nodeCount: 0,
    nodeLocations: ["string"],
    placementPolicy: {
        type: "string",
        policyName: "string",
        tpuTopology: "string",
    },
    project: "string",
    queuedProvisioning: {
        enabled: false,
    },
    upgradeSettings: {
        blueGreenSettings: {
            standardRolloutPolicy: {
                batchNodeCount: 0,
                batchPercentage: 0,
                batchSoakDuration: "string",
            },
            nodePoolSoakDuration: "string",
        },
        maxSurge: 0,
        maxUnavailable: 0,
        strategy: "string",
    },
    version: "string",
});
type: gcp:container:NodePool
properties:
    autoscaling:
        locationPolicy: string
        maxNodeCount: 0
        minNodeCount: 0
        totalMaxNodeCount: 0
        totalMinNodeCount: 0
    cluster: string
    initialNodeCount: 0
    location: string
    management:
        autoRepair: false
        autoUpgrade: false
    maxPodsPerNode: 0
    name: string
    namePrefix: string
    networkConfig:
        additionalNodeNetworkConfigs:
            - network: string
              subnetwork: string
        additionalPodNetworkConfigs:
            - maxPodsPerNode: 0
              secondaryPodRange: string
              subnetwork: string
        createPodRange: false
        enablePrivateNodes: false
        networkPerformanceConfig:
            totalEgressBandwidthTier: string
        podCidrOverprovisionConfig:
            disabled: false
        podIpv4CidrBlock: string
        podRange: string
    nodeConfig:
        advancedMachineFeatures:
            enableNestedVirtualization: false
            threadsPerCore: 0
        bootDiskKmsKey: string
        confidentialNodes:
            enabled: false
        containerdConfig:
            privateRegistryAccessConfig:
                certificateAuthorityDomainConfigs:
                    - fqdns:
                        - string
                      gcpSecretManagerCertificateConfig:
                        secretUri: string
                enabled: false
        diskSizeGb: 0
        diskType: string
        effectiveTaints:
            - effect: string
              key: string
              value: string
        enableConfidentialStorage: false
        ephemeralStorageConfig:
            localSsdCount: 0
        ephemeralStorageLocalSsdConfig:
            localSsdCount: 0
        fastSocket:
            enabled: false
        gcfsConfig:
            enabled: false
        guestAccelerators:
            - count: 0
              gpuDriverInstallationConfig:
                gpuDriverVersion: string
              gpuPartitionSize: string
              gpuSharingConfig:
                gpuSharingStrategy: string
                maxSharedClientsPerGpu: 0
              type: string
        gvnic:
            enabled: false
        hostMaintenancePolicy:
            maintenanceInterval: string
        imageType: string
        kubeletConfig:
            allowedUnsafeSysctls:
                - string
            containerLogMaxFiles: 0
            containerLogMaxSize: string
            cpuCfsQuota: false
            cpuCfsQuotaPeriod: string
            cpuManagerPolicy: string
            imageGcHighThresholdPercent: 0
            imageGcLowThresholdPercent: 0
            imageMaximumGcAge: string
            imageMinimumGcAge: string
            insecureKubeletReadonlyPortEnabled: string
            podPidsLimit: 0
        labels:
            string: string
        linuxNodeConfig:
            cgroupMode: string
            hugepagesConfig:
                hugepageSize1g: 0
                hugepageSize2m: 0
            sysctls:
                string: string
        localNvmeSsdBlockConfig:
            localSsdCount: 0
        localSsdCount: 0
        localSsdEncryptionMode: string
        loggingVariant: string
        machineType: string
        maxRunDuration: string
        metadata:
            string: string
        minCpuPlatform: string
        nodeGroup: string
        oauthScopes:
            - string
        preemptible: false
        reservationAffinity:
            consumeReservationType: string
            key: string
            values:
                - string
        resourceLabels:
            string: string
        resourceManagerTags:
            string: string
        sandboxConfig:
            sandboxType: string
        secondaryBootDisks:
            - diskImage: string
              mode: string
        serviceAccount: string
        shieldedInstanceConfig:
            enableIntegrityMonitoring: false
            enableSecureBoot: false
        soleTenantConfig:
            nodeAffinities:
                - key: string
                  operator: string
                  values:
                    - string
        spot: false
        storagePools:
            - string
        tags:
            - string
        taints:
            - effect: string
              key: string
              value: string
        workloadMetadataConfig:
            mode: string
    nodeCount: 0
    nodeLocations:
        - string
    placementPolicy:
        policyName: string
        tpuTopology: string
        type: string
    project: string
    queuedProvisioning:
        enabled: false
    upgradeSettings:
        blueGreenSettings:
            nodePoolSoakDuration: string
            standardRolloutPolicy:
                batchNodeCount: 0
                batchPercentage: 0
                batchSoakDuration: string
        maxSurge: 0
        maxUnavailable: 0
        strategy: string
    version: string
NodePool Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The NodePool resource accepts the following input properties:
- Cluster string
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- Autoscaling
NodePool Autoscaling 
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- InitialNode intCount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Location string
- The location (region or zone) of the cluster.
- Management
NodePool Management 
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- MaxPods intPer Node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- NamePrefix string
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- NetworkConfig NodePool Network Config 
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- NodeConfig NodePool Node Config 
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- NodeCount int
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- NodeLocations List<string>
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- PlacementPolicy NodePool Placement Policy 
- Specifies a custom placement policy for the nodes.
- Project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- QueuedProvisioning NodePool Queued Provisioning 
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- UpgradeSettings NodePool Upgrade Settings 
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
- Cluster string
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- Autoscaling
NodePool Autoscaling Args 
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- InitialNode intCount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Location string
- The location (region or zone) of the cluster.
- Management
NodePool Management Args 
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- MaxPods intPer Node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- NamePrefix string
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- NetworkConfig NodePool Network Config Args 
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- NodeConfig NodePool Node Config Args 
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- NodeCount int
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- NodeLocations []string
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- PlacementPolicy NodePool Placement Policy Args 
- Specifies a custom placement policy for the nodes.
- Project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- QueuedProvisioning NodePool Queued Provisioning Args 
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- UpgradeSettings NodePool Upgrade Settings Args 
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
- cluster String
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- autoscaling
NodePool Autoscaling 
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initialNode IntegerCount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location String
- The location (region or zone) of the cluster.
- management
NodePool Management 
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- maxPods IntegerPer Node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- namePrefix String
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- networkConfig NodePool Network Config 
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- nodeConfig NodePool Node Config 
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- nodeCount Integer
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- nodeLocations List<String>
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- placementPolicy NodePool Placement Policy 
- Specifies a custom placement policy for the nodes.
- project String
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queuedProvisioning NodePool Queued Provisioning 
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgradeSettings NodePool Upgrade Settings 
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
- cluster string
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- autoscaling
NodePool Autoscaling 
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initialNode numberCount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location string
- The location (region or zone) of the cluster.
- management
NodePool Management 
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- maxPods numberPer Node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- namePrefix string
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- networkConfig NodePool Network Config 
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- nodeConfig NodePool Node Config 
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- nodeCount number
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- nodeLocations string[]
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- placementPolicy NodePool Placement Policy 
- Specifies a custom placement policy for the nodes.
- project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queuedProvisioning NodePool Queued Provisioning 
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgradeSettings NodePool Upgrade Settings 
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
- cluster str
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- autoscaling
NodePool Autoscaling Args 
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial_node_ intcount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location str
- The location (region or zone) of the cluster.
- management
NodePool Management Args 
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max_pods_ intper_ node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name str
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name_prefix str
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- network_config NodePool Network Config Args 
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node_config NodePool Node Config Args 
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node_count int
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- node_locations Sequence[str]
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- placement_policy NodePool Placement Policy Args 
- Specifies a custom placement policy for the nodes.
- project str
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued_provisioning NodePool Queued Provisioning Args 
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgrade_settings NodePool Upgrade Settings Args 
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version str
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
- cluster String
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- autoscaling Property Map
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initialNode NumberCount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location String
- The location (region or zone) of the cluster.
- management Property Map
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- maxPods NumberPer Node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- namePrefix String
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- networkConfig Property Map
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- nodeConfig Property Map
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- nodeCount Number
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- nodeLocations List<String>
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- placementPolicy Property Map
- Specifies a custom placement policy for the nodes.
- project String
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queuedProvisioning Property Map
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgradeSettings Property Map
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
Outputs
All input properties are implicitly available as output properties. Additionally, the NodePool resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- InstanceGroup List<string>Urls 
- The resource URLs of the managed instance groups associated with this node pool.
- ManagedInstance List<string>Group Urls 
- List of instance group URLs which have been assigned to this node pool.
- Operation string
- Id string
- The provider-assigned unique ID for this managed resource.
- InstanceGroup []stringUrls 
- The resource URLs of the managed instance groups associated with this node pool.
- ManagedInstance []stringGroup Urls 
- List of instance group URLs which have been assigned to this node pool.
- Operation string
- id String
- The provider-assigned unique ID for this managed resource.
- instanceGroup List<String>Urls 
- The resource URLs of the managed instance groups associated with this node pool.
- managedInstance List<String>Group Urls 
- List of instance group URLs which have been assigned to this node pool.
- operation String
- id string
- The provider-assigned unique ID for this managed resource.
- instanceGroup string[]Urls 
- The resource URLs of the managed instance groups associated with this node pool.
- managedInstance string[]Group Urls 
- List of instance group URLs which have been assigned to this node pool.
- operation string
- id str
- The provider-assigned unique ID for this managed resource.
- instance_group_ Sequence[str]urls 
- The resource URLs of the managed instance groups associated with this node pool.
- managed_instance_ Sequence[str]group_ urls 
- List of instance group URLs which have been assigned to this node pool.
- operation str
- id String
- The provider-assigned unique ID for this managed resource.
- instanceGroup List<String>Urls 
- The resource URLs of the managed instance groups associated with this node pool.
- managedInstance List<String>Group Urls 
- List of instance group URLs which have been assigned to this node pool.
- operation String
Look up Existing NodePool Resource
Get an existing NodePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: NodePoolState, opts?: CustomResourceOptions): NodePool@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        autoscaling: Optional[NodePoolAutoscalingArgs] = None,
        cluster: Optional[str] = None,
        initial_node_count: Optional[int] = None,
        instance_group_urls: Optional[Sequence[str]] = None,
        location: Optional[str] = None,
        managed_instance_group_urls: Optional[Sequence[str]] = None,
        management: Optional[NodePoolManagementArgs] = None,
        max_pods_per_node: Optional[int] = None,
        name: Optional[str] = None,
        name_prefix: Optional[str] = None,
        network_config: Optional[NodePoolNetworkConfigArgs] = None,
        node_config: Optional[NodePoolNodeConfigArgs] = None,
        node_count: Optional[int] = None,
        node_locations: Optional[Sequence[str]] = None,
        operation: Optional[str] = None,
        placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
        project: Optional[str] = None,
        queued_provisioning: Optional[NodePoolQueuedProvisioningArgs] = None,
        upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
        version: Optional[str] = None) -> NodePoolfunc GetNodePool(ctx *Context, name string, id IDInput, state *NodePoolState, opts ...ResourceOption) (*NodePool, error)public static NodePool Get(string name, Input<string> id, NodePoolState? state, CustomResourceOptions? opts = null)public static NodePool get(String name, Output<String> id, NodePoolState state, CustomResourceOptions options)resources:  _:    type: gcp:container:NodePool    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Autoscaling
NodePool Autoscaling 
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Cluster string
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- InitialNode intCount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- InstanceGroup List<string>Urls 
- The resource URLs of the managed instance groups associated with this node pool.
- Location string
- The location (region or zone) of the cluster.
- ManagedInstance List<string>Group Urls 
- List of instance group URLs which have been assigned to this node pool.
- Management
NodePool Management 
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- MaxPods intPer Node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- NamePrefix string
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- NetworkConfig NodePool Network Config 
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- NodeConfig NodePool Node Config 
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- NodeCount int
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- NodeLocations List<string>
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- Operation string
- PlacementPolicy NodePool Placement Policy 
- Specifies a custom placement policy for the nodes.
- Project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- QueuedProvisioning NodePool Queued Provisioning 
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- UpgradeSettings NodePool Upgrade Settings 
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
- Autoscaling
NodePool Autoscaling Args 
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Cluster string
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- InitialNode intCount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- InstanceGroup []stringUrls 
- The resource URLs of the managed instance groups associated with this node pool.
- Location string
- The location (region or zone) of the cluster.
- ManagedInstance []stringGroup Urls 
- List of instance group URLs which have been assigned to this node pool.
- Management
NodePool Management Args 
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- MaxPods intPer Node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- NamePrefix string
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- NetworkConfig NodePool Network Config Args 
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- NodeConfig NodePool Node Config Args 
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- NodeCount int
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- NodeLocations []string
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- Operation string
- PlacementPolicy NodePool Placement Policy Args 
- Specifies a custom placement policy for the nodes.
- Project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- QueuedProvisioning NodePool Queued Provisioning Args 
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- UpgradeSettings NodePool Upgrade Settings Args 
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
- autoscaling
NodePool Autoscaling 
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster String
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- initialNode IntegerCount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instanceGroup List<String>Urls 
- The resource URLs of the managed instance groups associated with this node pool.
- location String
- The location (region or zone) of the cluster.
- managedInstance List<String>Group Urls 
- List of instance group URLs which have been assigned to this node pool.
- management
NodePool Management 
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- maxPods IntegerPer Node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- namePrefix String
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- networkConfig NodePool Network Config 
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- nodeConfig NodePool Node Config 
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- nodeCount Integer
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- nodeLocations List<String>
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- operation String
- placementPolicy NodePool Placement Policy 
- Specifies a custom placement policy for the nodes.
- project String
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queuedProvisioning NodePool Queued Provisioning 
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgradeSettings NodePool Upgrade Settings 
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
- autoscaling
NodePool Autoscaling 
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster string
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- initialNode numberCount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instanceGroup string[]Urls 
- The resource URLs of the managed instance groups associated with this node pool.
- location string
- The location (region or zone) of the cluster.
- managedInstance string[]Group Urls 
- List of instance group URLs which have been assigned to this node pool.
- management
NodePool Management 
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- maxPods numberPer Node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- namePrefix string
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- networkConfig NodePool Network Config 
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- nodeConfig NodePool Node Config 
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- nodeCount number
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- nodeLocations string[]
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- operation string
- placementPolicy NodePool Placement Policy 
- Specifies a custom placement policy for the nodes.
- project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queuedProvisioning NodePool Queued Provisioning 
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgradeSettings NodePool Upgrade Settings 
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
- autoscaling
NodePool Autoscaling Args 
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster str
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- initial_node_ intcount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance_group_ Sequence[str]urls 
- The resource URLs of the managed instance groups associated with this node pool.
- location str
- The location (region or zone) of the cluster.
- managed_instance_ Sequence[str]group_ urls 
- List of instance group URLs which have been assigned to this node pool.
- management
NodePool Management Args 
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max_pods_ intper_ node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name str
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name_prefix str
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- network_config NodePool Network Config Args 
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node_config NodePool Node Config Args 
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node_count int
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- node_locations Sequence[str]
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- operation str
- placement_policy NodePool Placement Policy Args 
- Specifies a custom placement policy for the nodes.
- project str
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued_provisioning NodePool Queued Provisioning Args 
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgrade_settings NodePool Upgrade Settings Args 
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version str
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
- autoscaling Property Map
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster String
- The cluster to create the node pool for. Cluster must be present in locationprovided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}or as just the name of the cluster.
- initialNode NumberCount 
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instanceGroup List<String>Urls 
- The resource URLs of the managed instance groups associated with this node pool.
- location String
- The location (region or zone) of the cluster.
- managedInstance List<String>Group Urls 
- List of instance group URLs which have been assigned to this node pool.
- management Property Map
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- maxPods NumberPer Node 
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- namePrefix String
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with name.
- networkConfig Property Map
- The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- nodeConfig Property Map
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- nodeCount Number
- The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside autoscaling.
- nodeLocations List<String>
- The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level - node_locationswill be used.- Note: - node_locationswill not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.
- operation String
- placementPolicy Property Map
- Specifies a custom placement policy for the nodes.
- project String
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queuedProvisioning Property Map
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgradeSettings Property Map
- Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and auto_upgradeare both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersionsdata source'sversion_prefixfield to approximate fuzzy versions in a provider-compatible way.
Supporting Types
NodePoolAutoscaling, NodePoolAutoscalingArgs      
- LocationPolicy string
- Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
 
- MaxNode intCount 
- Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- MinNode intCount 
- Minimum number of nodes per zone in the NodePool.
Must be >=0 and <= max_node_count. Cannot be used with total limits.
- TotalMax intNode Count 
- Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- TotalMin intNode Count 
- Total minimum number of nodes in the NodePool.
Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- LocationPolicy string
- Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
 
- MaxNode intCount 
- Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- MinNode intCount 
- Minimum number of nodes per zone in the NodePool.
Must be >=0 and <= max_node_count. Cannot be used with total limits.
- TotalMax intNode Count 
- Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- TotalMin intNode Count 
- Total minimum number of nodes in the NodePool.
Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- locationPolicy String
- Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
 
- maxNode IntegerCount 
- Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- minNode IntegerCount 
- Minimum number of nodes per zone in the NodePool.
Must be >=0 and <= max_node_count. Cannot be used with total limits.
- totalMax IntegerNode Count 
- Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- totalMin IntegerNode Count 
- Total minimum number of nodes in the NodePool.
Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- locationPolicy string
- Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
 
- maxNode numberCount 
- Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- minNode numberCount 
- Minimum number of nodes per zone in the NodePool.
Must be >=0 and <= max_node_count. Cannot be used with total limits.
- totalMax numberNode Count 
- Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- totalMin numberNode Count 
- Total minimum number of nodes in the NodePool.
Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location_policy str
- Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
 
- max_node_ intcount 
- Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min_node_ intcount 
- Minimum number of nodes per zone in the NodePool.
Must be >=0 and <= max_node_count. Cannot be used with total limits.
- total_max_ intnode_ count 
- Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total_min_ intnode_ count 
- Total minimum number of nodes in the NodePool.
Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- locationPolicy String
- Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
 
- maxNode NumberCount 
- Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- minNode NumberCount 
- Minimum number of nodes per zone in the NodePool.
Must be >=0 and <= max_node_count. Cannot be used with total limits.
- totalMax NumberNode Count 
- Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- totalMin NumberNode Count 
- Total minimum number of nodes in the NodePool.
Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
NodePoolManagement, NodePoolManagementArgs      
- AutoRepair bool
- Whether the nodes will be automatically repaired. Enabled by default.
- AutoUpgrade bool
- Whether the nodes will be automatically upgraded. Enabled by default.
- AutoRepair bool
- Whether the nodes will be automatically repaired. Enabled by default.
- AutoUpgrade bool
- Whether the nodes will be automatically upgraded. Enabled by default.
- autoRepair Boolean
- Whether the nodes will be automatically repaired. Enabled by default.
- autoUpgrade Boolean
- Whether the nodes will be automatically upgraded. Enabled by default.
- autoRepair boolean
- Whether the nodes will be automatically repaired. Enabled by default.
- autoUpgrade boolean
- Whether the nodes will be automatically upgraded. Enabled by default.
- auto_repair bool
- Whether the nodes will be automatically repaired. Enabled by default.
- auto_upgrade bool
- Whether the nodes will be automatically upgraded. Enabled by default.
- autoRepair Boolean
- Whether the nodes will be automatically repaired. Enabled by default.
- autoUpgrade Boolean
- Whether the nodes will be automatically upgraded. Enabled by default.
NodePoolNetworkConfig, NodePoolNetworkConfigArgs        
- AdditionalNode List<NodeNetwork Configs Pool Network Config Additional Node Network Config> 
- We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- AdditionalPod List<NodeNetwork Configs Pool Network Config Additional Pod Network Config> 
- We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- CreatePod boolRange 
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_rangeandpod_ipv4_cidr_blockif they are not specified.
- EnablePrivate boolNodes 
- Whether nodes have internal IP addresses only.
- NetworkPerformance NodeConfig Pool Network Config Network Performance Config 
- Network bandwidth tier configuration. Structure is documented below.
- PodCidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config 
- Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- PodIpv4Cidr stringBlock 
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- PodRange string
- The ID of the secondary range for pod IPs. If create_pod_rangeis true, this ID is used for the new range. Ifcreate_pod_rangeis false, uses an existing secondary range with this ID.
- AdditionalNode []NodeNetwork Configs Pool Network Config Additional Node Network Config 
- We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- AdditionalPod []NodeNetwork Configs Pool Network Config Additional Pod Network Config 
- We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- CreatePod boolRange 
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_rangeandpod_ipv4_cidr_blockif they are not specified.
- EnablePrivate boolNodes 
- Whether nodes have internal IP addresses only.
- NetworkPerformance NodeConfig Pool Network Config Network Performance Config 
- Network bandwidth tier configuration. Structure is documented below.
- PodCidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config 
- Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- PodIpv4Cidr stringBlock 
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- PodRange string
- The ID of the secondary range for pod IPs. If create_pod_rangeis true, this ID is used for the new range. Ifcreate_pod_rangeis false, uses an existing secondary range with this ID.
- additionalNode List<NodeNetwork Configs Pool Network Config Additional Node Network Config> 
- We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additionalPod List<NodeNetwork Configs Pool Network Config Additional Pod Network Config> 
- We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- createPod BooleanRange 
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_rangeandpod_ipv4_cidr_blockif they are not specified.
- enablePrivate BooleanNodes 
- Whether nodes have internal IP addresses only.
- networkPerformance NodeConfig Pool Network Config Network Performance Config 
- Network bandwidth tier configuration. Structure is documented below.
- podCidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config 
- Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- podIpv4Cidr StringBlock 
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- podRange String
- The ID of the secondary range for pod IPs. If create_pod_rangeis true, this ID is used for the new range. Ifcreate_pod_rangeis false, uses an existing secondary range with this ID.
- additionalNode NodeNetwork Configs Pool Network Config Additional Node Network Config[] 
- We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additionalPod NodeNetwork Configs Pool Network Config Additional Pod Network Config[] 
- We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- createPod booleanRange 
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_rangeandpod_ipv4_cidr_blockif they are not specified.
- enablePrivate booleanNodes 
- Whether nodes have internal IP addresses only.
- networkPerformance NodeConfig Pool Network Config Network Performance Config 
- Network bandwidth tier configuration. Structure is documented below.
- podCidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config 
- Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- podIpv4Cidr stringBlock 
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- podRange string
- The ID of the secondary range for pod IPs. If create_pod_rangeis true, this ID is used for the new range. Ifcreate_pod_rangeis false, uses an existing secondary range with this ID.
- additional_node_ Sequence[Nodenetwork_ configs Pool Network Config Additional Node Network Config] 
- We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additional_pod_ Sequence[Nodenetwork_ configs Pool Network Config Additional Pod Network Config] 
- We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- create_pod_ boolrange 
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_rangeandpod_ipv4_cidr_blockif they are not specified.
- enable_private_ boolnodes 
- Whether nodes have internal IP addresses only.
- network_performance_ Nodeconfig Pool Network Config Network Performance Config 
- Network bandwidth tier configuration. Structure is documented below.
- pod_cidr_ Nodeoverprovision_ config Pool Network Config Pod Cidr Overprovision Config 
- Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- pod_ipv4_ strcidr_ block 
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod_range str
- The ID of the secondary range for pod IPs. If create_pod_rangeis true, this ID is used for the new range. Ifcreate_pod_rangeis false, uses an existing secondary range with this ID.
- additionalNode List<Property Map>Network Configs 
- We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additionalPod List<Property Map>Network Configs 
- We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- createPod BooleanRange 
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_rangeandpod_ipv4_cidr_blockif they are not specified.
- enablePrivate BooleanNodes 
- Whether nodes have internal IP addresses only.
- networkPerformance Property MapConfig 
- Network bandwidth tier configuration. Structure is documented below.
- podCidr Property MapOverprovision Config 
- Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- podIpv4Cidr StringBlock 
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- podRange String
- The ID of the secondary range for pod IPs. If create_pod_rangeis true, this ID is used for the new range. Ifcreate_pod_rangeis false, uses an existing secondary range with this ID.
NodePoolNetworkConfigAdditionalNodeNetworkConfig, NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs                
- Network string
- Name of the VPC where the additional interface belongs.
- Subnetwork string
- Name of the subnetwork where the additional interface belongs.
- Network string
- Name of the VPC where the additional interface belongs.
- Subnetwork string
- Name of the subnetwork where the additional interface belongs.
- network String
- Name of the VPC where the additional interface belongs.
- subnetwork String
- Name of the subnetwork where the additional interface belongs.
- network string
- Name of the VPC where the additional interface belongs.
- subnetwork string
- Name of the subnetwork where the additional interface belongs.
- network str
- Name of the VPC where the additional interface belongs.
- subnetwork str
- Name of the subnetwork where the additional interface belongs.
- network String
- Name of the VPC where the additional interface belongs.
- subnetwork String
- Name of the subnetwork where the additional interface belongs.
NodePoolNetworkConfigAdditionalPodNetworkConfig, NodePoolNetworkConfigAdditionalPodNetworkConfigArgs                
- MaxPods intPer Node 
- The maximum number of pods per node which use this pod network.
- SecondaryPod stringRange 
- The name of the secondary range on the subnet which provides IP address for this pod range.
- Subnetwork string
- Name of the subnetwork where the additional pod network belongs.
- MaxPods intPer Node 
- The maximum number of pods per node which use this pod network.
- SecondaryPod stringRange 
- The name of the secondary range on the subnet which provides IP address for this pod range.
- Subnetwork string
- Name of the subnetwork where the additional pod network belongs.
- maxPods IntegerPer Node 
- The maximum number of pods per node which use this pod network.
- secondaryPod StringRange 
- The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork String
- Name of the subnetwork where the additional pod network belongs.
- maxPods numberPer Node 
- The maximum number of pods per node which use this pod network.
- secondaryPod stringRange 
- The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork string
- Name of the subnetwork where the additional pod network belongs.
- max_pods_ intper_ node 
- The maximum number of pods per node which use this pod network.
- secondary_pod_ strrange 
- The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork str
- Name of the subnetwork where the additional pod network belongs.
- maxPods NumberPer Node 
- The maximum number of pods per node which use this pod network.
- secondaryPod StringRange 
- The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork String
- Name of the subnetwork where the additional pod network belongs.
NodePoolNetworkConfigNetworkPerformanceConfig, NodePoolNetworkConfigNetworkPerformanceConfigArgs              
- TotalEgress stringBandwidth Tier 
- Specifies the total network bandwidth tier for the NodePool.
- TotalEgress stringBandwidth Tier 
- Specifies the total network bandwidth tier for the NodePool.
- totalEgress StringBandwidth Tier 
- Specifies the total network bandwidth tier for the NodePool.
- totalEgress stringBandwidth Tier 
- Specifies the total network bandwidth tier for the NodePool.
- total_egress_ strbandwidth_ tier 
- Specifies the total network bandwidth tier for the NodePool.
- totalEgress StringBandwidth Tier 
- Specifies the total network bandwidth tier for the NodePool.
NodePoolNetworkConfigPodCidrOverprovisionConfig, NodePoolNetworkConfigPodCidrOverprovisionConfigArgs                
- Disabled bool
- Whether pod cidr overprovision is disabled.
- Disabled bool
- Whether pod cidr overprovision is disabled.
- disabled Boolean
- Whether pod cidr overprovision is disabled.
- disabled boolean
- Whether pod cidr overprovision is disabled.
- disabled bool
- Whether pod cidr overprovision is disabled.
- disabled Boolean
- Whether pod cidr overprovision is disabled.
NodePoolNodeConfig, NodePoolNodeConfigArgs        
- AdvancedMachine NodeFeatures Pool Node Config Advanced Machine Features 
- Specifies options for controlling advanced machine features.
- BootDisk stringKms Key 
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- ConfidentialNodes NodePool Node Config Confidential Nodes 
- Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- ContainerdConfig NodePool Node Config Containerd Config 
- Parameters for containerd configuration.
- DiskSize intGb 
- Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- DiskType string
- Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- EffectiveTaints List<NodePool Node Config Effective Taint> 
- List of kubernetes taints applied to each node.
- EnableConfidential boolStorage 
- If enabled boot disks are configured with confidential mode.
- EphemeralStorage NodeConfig Pool Node Config Ephemeral Storage Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- EphemeralStorage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- FastSocket NodePool Node Config Fast Socket 
- Enable or disable NCCL Fast Socket in the node pool.
- GcfsConfig NodePool Node Config Gcfs Config 
- GCFS configuration for this node.
- GuestAccelerators List<NodePool Node Config Guest Accelerator> 
- List of the type and count of accelerator cards attached to the instance.
- Gvnic
NodePool Node Config Gvnic 
- Enable or disable gvnic in the node pool.
- HostMaintenance NodePolicy Pool Node Config Host Maintenance Policy 
- The maintenance policy for the hosts on which the GKE VMs run on.
- ImageType string
- The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- KubeletConfig NodePool Node Config Kubelet Config 
- Node kubelet configs.
- Labels Dictionary<string, string>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- LinuxNode NodeConfig Pool Node Config Linux Node Config 
- Parameters that can be configured on Linux nodes.
- LocalNvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config 
- Parameters for raw-block local NVMe SSDs.
- LocalSsd intCount 
- The number of local SSD disks to be attached to the node.
- LocalSsd stringEncryption Mode 
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- LoggingVariant string
- Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- MachineType string
- The name of a Google Compute Engine machine type.
- MaxRun stringDuration 
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- Metadata Dictionary<string, string>
- The metadata key/value pairs assigned to instances in the cluster.
- MinCpu stringPlatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- NodeGroup string
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- OauthScopes List<string>
- The set of Google API scopes to be made available on all of the node VMs.
- Preemptible bool
- Whether the nodes are created as preemptible VM instances.
- ReservationAffinity NodePool Node Config Reservation Affinity 
- The configuration of the desired reservation which instances could take capacity from. Structure is documented below. - The - autoscalingblock supports (either total or per zone limits are required):
- ResourceLabels Dictionary<string, string>
- The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Dictionary<string, string>
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- SandboxConfig NodePool Node Config Sandbox Config 
- Sandbox configuration for this node.
- SecondaryBoot List<NodeDisks Pool Node Config Secondary Boot Disk> 
- Secondary boot disks for preloading data or container images.
- ServiceAccount string
- The Google Cloud Platform Service Account to be used by the node VMs.
- ShieldedInstance NodeConfig Pool Node Config Shielded Instance Config 
- Shielded Instance options.
- SoleTenant NodeConfig Pool Node Config Sole Tenant Config 
- Node affinity options for sole tenant node pools.
- Spot bool
- Whether the nodes are created as spot VM instances.
- StoragePools List<string>
- The list of Storage Pools where boot disks are provisioned.
- List<string>
- The list of instance tags applied to all nodes.
- Taints
List<NodePool Node Config Taint> 
- List of Kubernetes taints to be applied to each node.
- WorkloadMetadata NodeConfig Pool Node Config Workload Metadata Config 
- The workload metadata configuration for this node.
- AdvancedMachine NodeFeatures Pool Node Config Advanced Machine Features 
- Specifies options for controlling advanced machine features.
- BootDisk stringKms Key 
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- ConfidentialNodes NodePool Node Config Confidential Nodes 
- Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- ContainerdConfig NodePool Node Config Containerd Config 
- Parameters for containerd configuration.
- DiskSize intGb 
- Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- DiskType string
- Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- EffectiveTaints []NodePool Node Config Effective Taint 
- List of kubernetes taints applied to each node.
- EnableConfidential boolStorage 
- If enabled boot disks are configured with confidential mode.
- EphemeralStorage NodeConfig Pool Node Config Ephemeral Storage Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- EphemeralStorage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- FastSocket NodePool Node Config Fast Socket 
- Enable or disable NCCL Fast Socket in the node pool.
- GcfsConfig NodePool Node Config Gcfs Config 
- GCFS configuration for this node.
- GuestAccelerators []NodePool Node Config Guest Accelerator 
- List of the type and count of accelerator cards attached to the instance.
- Gvnic
NodePool Node Config Gvnic 
- Enable or disable gvnic in the node pool.
- HostMaintenance NodePolicy Pool Node Config Host Maintenance Policy 
- The maintenance policy for the hosts on which the GKE VMs run on.
- ImageType string
- The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- KubeletConfig NodePool Node Config Kubelet Config 
- Node kubelet configs.
- Labels map[string]string
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- LinuxNode NodeConfig Pool Node Config Linux Node Config 
- Parameters that can be configured on Linux nodes.
- LocalNvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config 
- Parameters for raw-block local NVMe SSDs.
- LocalSsd intCount 
- The number of local SSD disks to be attached to the node.
- LocalSsd stringEncryption Mode 
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- LoggingVariant string
- Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- MachineType string
- The name of a Google Compute Engine machine type.
- MaxRun stringDuration 
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- Metadata map[string]string
- The metadata key/value pairs assigned to instances in the cluster.
- MinCpu stringPlatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- NodeGroup string
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- OauthScopes []string
- The set of Google API scopes to be made available on all of the node VMs.
- Preemptible bool
- Whether the nodes are created as preemptible VM instances.
- ReservationAffinity NodePool Node Config Reservation Affinity 
- The configuration of the desired reservation which instances could take capacity from. Structure is documented below. - The - autoscalingblock supports (either total or per zone limits are required):
- ResourceLabels map[string]string
- The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- map[string]string
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- SandboxConfig NodePool Node Config Sandbox Config 
- Sandbox configuration for this node.
- SecondaryBoot []NodeDisks Pool Node Config Secondary Boot Disk 
- Secondary boot disks for preloading data or container images.
- ServiceAccount string
- The Google Cloud Platform Service Account to be used by the node VMs.
- ShieldedInstance NodeConfig Pool Node Config Shielded Instance Config 
- Shielded Instance options.
- SoleTenant NodeConfig Pool Node Config Sole Tenant Config 
- Node affinity options for sole tenant node pools.
- Spot bool
- Whether the nodes are created as spot VM instances.
- StoragePools []string
- The list of Storage Pools where boot disks are provisioned.
- []string
- The list of instance tags applied to all nodes.
- Taints
[]NodePool Node Config Taint 
- List of Kubernetes taints to be applied to each node.
- WorkloadMetadata NodeConfig Pool Node Config Workload Metadata Config 
- The workload metadata configuration for this node.
- advancedMachine NodeFeatures Pool Node Config Advanced Machine Features 
- Specifies options for controlling advanced machine features.
- bootDisk StringKms Key 
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidentialNodes NodePool Node Config Confidential Nodes 
- Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- containerdConfig NodePool Node Config Containerd Config 
- Parameters for containerd configuration.
- diskSize IntegerGb 
- Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- diskType String
- Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effectiveTaints List<NodePool Node Config Effective Taint> 
- List of kubernetes taints applied to each node.
- enableConfidential BooleanStorage 
- If enabled boot disks are configured with confidential mode.
- ephemeralStorage NodeConfig Pool Node Config Ephemeral Storage Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeralStorage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fastSocket NodePool Node Config Fast Socket 
- Enable or disable NCCL Fast Socket in the node pool.
- gcfsConfig NodePool Node Config Gcfs Config 
- GCFS configuration for this node.
- guestAccelerators List<NodePool Node Config Guest Accelerator> 
- List of the type and count of accelerator cards attached to the instance.
- gvnic
NodePool Node Config Gvnic 
- Enable or disable gvnic in the node pool.
- hostMaintenance NodePolicy Pool Node Config Host Maintenance Policy 
- The maintenance policy for the hosts on which the GKE VMs run on.
- imageType String
- The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubeletConfig NodePool Node Config Kubelet Config 
- Node kubelet configs.
- labels Map<String,String>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linuxNode NodeConfig Pool Node Config Linux Node Config 
- Parameters that can be configured on Linux nodes.
- localNvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config 
- Parameters for raw-block local NVMe SSDs.
- localSsd IntegerCount 
- The number of local SSD disks to be attached to the node.
- localSsd StringEncryption Mode 
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- loggingVariant String
- Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machineType String
- The name of a Google Compute Engine machine type.
- maxRun StringDuration 
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- metadata Map<String,String>
- The metadata key/value pairs assigned to instances in the cluster.
- minCpu StringPlatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- nodeGroup String
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauthScopes List<String>
- The set of Google API scopes to be made available on all of the node VMs.
- preemptible Boolean
- Whether the nodes are created as preemptible VM instances.
- reservationAffinity NodePool Node Config Reservation Affinity 
- The configuration of the desired reservation which instances could take capacity from. Structure is documented below. - The - autoscalingblock supports (either total or per zone limits are required):
- resourceLabels Map<String,String>
- The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Map<String,String>
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandboxConfig NodePool Node Config Sandbox Config 
- Sandbox configuration for this node.
- secondaryBoot List<NodeDisks Pool Node Config Secondary Boot Disk> 
- Secondary boot disks for preloading data or container images.
- serviceAccount String
- The Google Cloud Platform Service Account to be used by the node VMs.
- shieldedInstance NodeConfig Pool Node Config Shielded Instance Config 
- Shielded Instance options.
- soleTenant NodeConfig Pool Node Config Sole Tenant Config 
- Node affinity options for sole tenant node pools.
- spot Boolean
- Whether the nodes are created as spot VM instances.
- storagePools List<String>
- The list of Storage Pools where boot disks are provisioned.
- List<String>
- The list of instance tags applied to all nodes.
- taints
List<NodePool Node Config Taint> 
- List of Kubernetes taints to be applied to each node.
- workloadMetadata NodeConfig Pool Node Config Workload Metadata Config 
- The workload metadata configuration for this node.
- advancedMachine NodeFeatures Pool Node Config Advanced Machine Features 
- Specifies options for controlling advanced machine features.
- bootDisk stringKms Key 
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidentialNodes NodePool Node Config Confidential Nodes 
- Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- containerdConfig NodePool Node Config Containerd Config 
- Parameters for containerd configuration.
- diskSize numberGb 
- Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- diskType string
- Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effectiveTaints NodePool Node Config Effective Taint[] 
- List of kubernetes taints applied to each node.
- enableConfidential booleanStorage 
- If enabled boot disks are configured with confidential mode.
- ephemeralStorage NodeConfig Pool Node Config Ephemeral Storage Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeralStorage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fastSocket NodePool Node Config Fast Socket 
- Enable or disable NCCL Fast Socket in the node pool.
- gcfsConfig NodePool Node Config Gcfs Config 
- GCFS configuration for this node.
- guestAccelerators NodePool Node Config Guest Accelerator[] 
- List of the type and count of accelerator cards attached to the instance.
- gvnic
NodePool Node Config Gvnic 
- Enable or disable gvnic in the node pool.
- hostMaintenance NodePolicy Pool Node Config Host Maintenance Policy 
- The maintenance policy for the hosts on which the GKE VMs run on.
- imageType string
- The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubeletConfig NodePool Node Config Kubelet Config 
- Node kubelet configs.
- labels {[key: string]: string}
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linuxNode NodeConfig Pool Node Config Linux Node Config 
- Parameters that can be configured on Linux nodes.
- localNvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config 
- Parameters for raw-block local NVMe SSDs.
- localSsd numberCount 
- The number of local SSD disks to be attached to the node.
- localSsd stringEncryption Mode 
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- loggingVariant string
- Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machineType string
- The name of a Google Compute Engine machine type.
- maxRun stringDuration 
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- metadata {[key: string]: string}
- The metadata key/value pairs assigned to instances in the cluster.
- minCpu stringPlatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- nodeGroup string
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauthScopes string[]
- The set of Google API scopes to be made available on all of the node VMs.
- preemptible boolean
- Whether the nodes are created as preemptible VM instances.
- reservationAffinity NodePool Node Config Reservation Affinity 
- The configuration of the desired reservation which instances could take capacity from. Structure is documented below. - The - autoscalingblock supports (either total or per zone limits are required):
- resourceLabels {[key: string]: string}
- The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- {[key: string]: string}
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandboxConfig NodePool Node Config Sandbox Config 
- Sandbox configuration for this node.
- secondaryBoot NodeDisks Pool Node Config Secondary Boot Disk[] 
- Secondary boot disks for preloading data or container images.
- serviceAccount string
- The Google Cloud Platform Service Account to be used by the node VMs.
- shieldedInstance NodeConfig Pool Node Config Shielded Instance Config 
- Shielded Instance options.
- soleTenant NodeConfig Pool Node Config Sole Tenant Config 
- Node affinity options for sole tenant node pools.
- spot boolean
- Whether the nodes are created as spot VM instances.
- storagePools string[]
- The list of Storage Pools where boot disks are provisioned.
- string[]
- The list of instance tags applied to all nodes.
- taints
NodePool Node Config Taint[] 
- List of Kubernetes taints to be applied to each node.
- workloadMetadata NodeConfig Pool Node Config Workload Metadata Config 
- The workload metadata configuration for this node.
- advanced_machine_ Nodefeatures Pool Node Config Advanced Machine Features 
- Specifies options for controlling advanced machine features.
- boot_disk_ strkms_ key 
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidential_nodes NodePool Node Config Confidential Nodes 
- Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- containerd_config NodePool Node Config Containerd Config 
- Parameters for containerd configuration.
- disk_size_ intgb 
- Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- disk_type str
- Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effective_taints Sequence[NodePool Node Config Effective Taint] 
- List of kubernetes taints applied to each node.
- enable_confidential_ boolstorage 
- If enabled boot disks are configured with confidential mode.
- ephemeral_storage_ Nodeconfig Pool Node Config Ephemeral Storage Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeral_storage_ Nodelocal_ ssd_ config Pool Node Config Ephemeral Storage Local Ssd Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fast_socket NodePool Node Config Fast Socket 
- Enable or disable NCCL Fast Socket in the node pool.
- gcfs_config NodePool Node Config Gcfs Config 
- GCFS configuration for this node.
- guest_accelerators Sequence[NodePool Node Config Guest Accelerator] 
- List of the type and count of accelerator cards attached to the instance.
- gvnic
NodePool Node Config Gvnic 
- Enable or disable gvnic in the node pool.
- host_maintenance_ Nodepolicy Pool Node Config Host Maintenance Policy 
- The maintenance policy for the hosts on which the GKE VMs run on.
- image_type str
- The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubelet_config NodePool Node Config Kubelet Config 
- Node kubelet configs.
- labels Mapping[str, str]
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linux_node_ Nodeconfig Pool Node Config Linux Node Config 
- Parameters that can be configured on Linux nodes.
- local_nvme_ Nodessd_ block_ config Pool Node Config Local Nvme Ssd Block Config 
- Parameters for raw-block local NVMe SSDs.
- local_ssd_ intcount 
- The number of local SSD disks to be attached to the node.
- local_ssd_ strencryption_ mode 
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- logging_variant str
- Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machine_type str
- The name of a Google Compute Engine machine type.
- max_run_ strduration 
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- metadata Mapping[str, str]
- The metadata key/value pairs assigned to instances in the cluster.
- min_cpu_ strplatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- node_group str
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauth_scopes Sequence[str]
- The set of Google API scopes to be made available on all of the node VMs.
- preemptible bool
- Whether the nodes are created as preemptible VM instances.
- reservation_affinity NodePool Node Config Reservation Affinity 
- The configuration of the desired reservation which instances could take capacity from. Structure is documented below. - The - autoscalingblock supports (either total or per zone limits are required):
- resource_labels Mapping[str, str]
- The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Mapping[str, str]
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandbox_config NodePool Node Config Sandbox Config 
- Sandbox configuration for this node.
- secondary_boot_ Sequence[Nodedisks Pool Node Config Secondary Boot Disk] 
- Secondary boot disks for preloading data or container images.
- service_account str
- The Google Cloud Platform Service Account to be used by the node VMs.
- shielded_instance_ Nodeconfig Pool Node Config Shielded Instance Config 
- Shielded Instance options.
- sole_tenant_ Nodeconfig Pool Node Config Sole Tenant Config 
- Node affinity options for sole tenant node pools.
- spot bool
- Whether the nodes are created as spot VM instances.
- storage_pools Sequence[str]
- The list of Storage Pools where boot disks are provisioned.
- Sequence[str]
- The list of instance tags applied to all nodes.
- taints
Sequence[NodePool Node Config Taint] 
- List of Kubernetes taints to be applied to each node.
- workload_metadata_ Nodeconfig Pool Node Config Workload Metadata Config 
- The workload metadata configuration for this node.
- advancedMachine Property MapFeatures 
- Specifies options for controlling advanced machine features.
- bootDisk StringKms Key 
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidentialNodes Property Map
- Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- containerdConfig Property Map
- Parameters for containerd configuration.
- diskSize NumberGb 
- Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- diskType String
- Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effectiveTaints List<Property Map>
- List of kubernetes taints applied to each node.
- enableConfidential BooleanStorage 
- If enabled boot disks are configured with confidential mode.
- ephemeralStorage Property MapConfig 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeralStorage Property MapLocal Ssd Config 
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fastSocket Property Map
- Enable or disable NCCL Fast Socket in the node pool.
- gcfsConfig Property Map
- GCFS configuration for this node.
- guestAccelerators List<Property Map>
- List of the type and count of accelerator cards attached to the instance.
- gvnic Property Map
- Enable or disable gvnic in the node pool.
- hostMaintenance Property MapPolicy 
- The maintenance policy for the hosts on which the GKE VMs run on.
- imageType String
- The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubeletConfig Property Map
- Node kubelet configs.
- labels Map<String>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linuxNode Property MapConfig 
- Parameters that can be configured on Linux nodes.
- localNvme Property MapSsd Block Config 
- Parameters for raw-block local NVMe SSDs.
- localSsd NumberCount 
- The number of local SSD disks to be attached to the node.
- localSsd StringEncryption Mode 
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- loggingVariant String
- Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machineType String
- The name of a Google Compute Engine machine type.
- maxRun StringDuration 
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- metadata Map<String>
- The metadata key/value pairs assigned to instances in the cluster.
- minCpu StringPlatform 
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- nodeGroup String
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauthScopes List<String>
- The set of Google API scopes to be made available on all of the node VMs.
- preemptible Boolean
- Whether the nodes are created as preemptible VM instances.
- reservationAffinity Property Map
- The configuration of the desired reservation which instances could take capacity from. Structure is documented below. - The - autoscalingblock supports (either total or per zone limits are required):
- resourceLabels Map<String>
- The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Map<String>
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandboxConfig Property Map
- Sandbox configuration for this node.
- secondaryBoot List<Property Map>Disks 
- Secondary boot disks for preloading data or container images.
- serviceAccount String
- The Google Cloud Platform Service Account to be used by the node VMs.
- shieldedInstance Property MapConfig 
- Shielded Instance options.
- soleTenant Property MapConfig 
- Node affinity options for sole tenant node pools.
- spot Boolean
- Whether the nodes are created as spot VM instances.
- storagePools List<String>
- The list of Storage Pools where boot disks are provisioned.
- List<String>
- The list of instance tags applied to all nodes.
- taints List<Property Map>
- List of Kubernetes taints to be applied to each node.
- workloadMetadata Property MapConfig 
- The workload metadata configuration for this node.
NodePoolNodeConfigAdvancedMachineFeatures, NodePoolNodeConfigAdvancedMachineFeaturesArgs              
- ThreadsPer intCore 
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- EnableNested boolVirtualization 
- Whether the node should have nested virtualization enabled.
- ThreadsPer intCore 
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- EnableNested boolVirtualization 
- Whether the node should have nested virtualization enabled.
- threadsPer IntegerCore 
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enableNested BooleanVirtualization 
- Whether the node should have nested virtualization enabled.
- threadsPer numberCore 
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enableNested booleanVirtualization 
- Whether the node should have nested virtualization enabled.
- threads_per_ intcore 
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enable_nested_ boolvirtualization 
- Whether the node should have nested virtualization enabled.
- threadsPer NumberCore 
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enableNested BooleanVirtualization 
- Whether the node should have nested virtualization enabled.
NodePoolNodeConfigConfidentialNodes, NodePoolNodeConfigConfidentialNodesArgs            
- Enabled bool
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
- Enabled bool
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
- enabled Boolean
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
- enabled boolean
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
- enabled bool
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
- enabled Boolean
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
NodePoolNodeConfigContainerdConfig, NodePoolNodeConfigContainerdConfigArgs            
- PrivateRegistry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config 
- Parameters for private container registries configuration.
- PrivateRegistry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config 
- Parameters for private container registries configuration.
- privateRegistry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config 
- Parameters for private container registries configuration.
- privateRegistry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config 
- Parameters for private container registries configuration.
- private_registry_ Nodeaccess_ config Pool Node Config Containerd Config Private Registry Access Config 
- Parameters for private container registries configuration.
- privateRegistry Property MapAccess Config 
- Parameters for private container registries configuration.
NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs                    
- Enabled bool
- Whether or not private registries are configured.
- 
List<NodePool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config> 
- Parameters for configuring CA certificate and domains.
- Enabled bool
- Whether or not private registries are configured.
- 
[]NodePool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config 
- Parameters for configuring CA certificate and domains.
- enabled Boolean
- Whether or not private registries are configured.
- 
List<NodePool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config> 
- Parameters for configuring CA certificate and domains.
- enabled boolean
- Whether or not private registries are configured.
- 
NodePool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config[] 
- Parameters for configuring CA certificate and domains.
- enabled bool
- Whether or not private registries are configured.
- 
Sequence[NodePool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config] 
- Parameters for configuring CA certificate and domains.
- enabled Boolean
- Whether or not private registries are configured.
- List<Property Map>
- Parameters for configuring CA certificate and domains.
NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs                            
- Fqdns List<string>
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- GcpSecret NodeManager Certificate Config Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config 
- Parameters for configuring a certificate hosted in GCP SecretManager.
- Fqdns []string
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- GcpSecret NodeManager Certificate Config Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config 
- Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns List<String>
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcpSecret NodeManager Certificate Config Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config 
- Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns string[]
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcpSecret NodeManager Certificate Config Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config 
- Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns Sequence[str]
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcp_secret_ Nodemanager_ certificate_ config Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config 
- Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns List<String>
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcpSecret Property MapManager Certificate Config 
- Parameters for configuring a certificate hosted in GCP SecretManager.
NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs                                      
- SecretUri string
- URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- SecretUri string
- URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secretUri String
- URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secretUri string
- URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secret_uri str
- URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secretUri String
- URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
NodePoolNodeConfigEffectiveTaint, NodePoolNodeConfigEffectiveTaintArgs            
NodePoolNodeConfigEphemeralStorageConfig, NodePoolNodeConfigEphemeralStorageConfigArgs              
- LocalSsd intCount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- LocalSsd intCount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- localSsd IntegerCount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- localSsd numberCount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local_ssd_ intcount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- localSsd NumberCount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
NodePoolNodeConfigEphemeralStorageLocalSsdConfig, NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs                  
- LocalSsd intCount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- LocalSsd intCount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- localSsd IntegerCount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- localSsd numberCount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local_ssd_ intcount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- localSsd NumberCount 
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
NodePoolNodeConfigFastSocket, NodePoolNodeConfigFastSocketArgs            
- Enabled bool
- Whether or not NCCL Fast Socket is enabled
- Enabled bool
- Whether or not NCCL Fast Socket is enabled
- enabled Boolean
- Whether or not NCCL Fast Socket is enabled
- enabled boolean
- Whether or not NCCL Fast Socket is enabled
- enabled bool
- Whether or not NCCL Fast Socket is enabled
- enabled Boolean
- Whether or not NCCL Fast Socket is enabled
NodePoolNodeConfigGcfsConfig, NodePoolNodeConfigGcfsConfigArgs            
- Enabled bool
- Whether or not GCFS is enabled
- Enabled bool
- Whether or not GCFS is enabled
- enabled Boolean
- Whether or not GCFS is enabled
- enabled boolean
- Whether or not GCFS is enabled
- enabled bool
- Whether or not GCFS is enabled
- enabled Boolean
- Whether or not GCFS is enabled
NodePoolNodeConfigGuestAccelerator, NodePoolNodeConfigGuestAcceleratorArgs            
- Count int
- The number of the accelerator cards exposed to an instance.
- Type string
- The accelerator type resource name.
- GpuDriver NodeInstallation Config Pool Node Config Guest Accelerator Gpu Driver Installation Config 
- Configuration for auto installation of GPU driver.
- GpuPartition stringSize 
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- GpuSharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config 
- Configuration for GPU sharing.
- Count int
- The number of the accelerator cards exposed to an instance.
- Type string
- The accelerator type resource name.
- GpuDriver NodeInstallation Config Pool Node Config Guest Accelerator Gpu Driver Installation Config 
- Configuration for auto installation of GPU driver.
- GpuPartition stringSize 
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- GpuSharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config 
- Configuration for GPU sharing.
- count Integer
- The number of the accelerator cards exposed to an instance.
- type String
- The accelerator type resource name.
- gpuDriver NodeInstallation Config Pool Node Config Guest Accelerator Gpu Driver Installation Config 
- Configuration for auto installation of GPU driver.
- gpuPartition StringSize 
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpuSharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config 
- Configuration for GPU sharing.
- count number
- The number of the accelerator cards exposed to an instance.
- type string
- The accelerator type resource name.
- gpuDriver NodeInstallation Config Pool Node Config Guest Accelerator Gpu Driver Installation Config 
- Configuration for auto installation of GPU driver.
- gpuPartition stringSize 
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpuSharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config 
- Configuration for GPU sharing.
- count int
- The number of the accelerator cards exposed to an instance.
- type str
- The accelerator type resource name.
- gpu_driver_ Nodeinstallation_ config Pool Node Config Guest Accelerator Gpu Driver Installation Config 
- Configuration for auto installation of GPU driver.
- gpu_partition_ strsize 
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpu_sharing_ Nodeconfig Pool Node Config Guest Accelerator Gpu Sharing Config 
- Configuration for GPU sharing.
- count Number
- The number of the accelerator cards exposed to an instance.
- type String
- The accelerator type resource name.
- gpuDriver Property MapInstallation Config 
- Configuration for auto installation of GPU driver.
- gpuPartition StringSize 
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpuSharing Property MapConfig 
- Configuration for GPU sharing.
NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig, NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs                    
- GpuDriver stringVersion 
- Mode for how the GPU driver is installed.
- GpuDriver stringVersion 
- Mode for how the GPU driver is installed.
- gpuDriver StringVersion 
- Mode for how the GPU driver is installed.
- gpuDriver stringVersion 
- Mode for how the GPU driver is installed.
- gpu_driver_ strversion 
- Mode for how the GPU driver is installed.
- gpuDriver StringVersion 
- Mode for how the GPU driver is installed.
NodePoolNodeConfigGuestAcceleratorGpuSharingConfig, NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs                  
- GpuSharing stringStrategy 
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- int
- The maximum number of containers that can share a GPU.
- GpuSharing stringStrategy 
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- int
- The maximum number of containers that can share a GPU.
- gpuSharing StringStrategy 
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- Integer
- The maximum number of containers that can share a GPU.
- gpuSharing stringStrategy 
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- number
- The maximum number of containers that can share a GPU.
- gpu_sharing_ strstrategy 
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- int
- The maximum number of containers that can share a GPU.
- gpuSharing StringStrategy 
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- Number
- The maximum number of containers that can share a GPU.
NodePoolNodeConfigGvnic, NodePoolNodeConfigGvnicArgs          
- Enabled bool
- Whether or not gvnic is enabled
- Enabled bool
- Whether or not gvnic is enabled
- enabled Boolean
- Whether or not gvnic is enabled
- enabled boolean
- Whether or not gvnic is enabled
- enabled bool
- Whether or not gvnic is enabled
- enabled Boolean
- Whether or not gvnic is enabled
NodePoolNodeConfigHostMaintenancePolicy, NodePoolNodeConfigHostMaintenancePolicyArgs              
- MaintenanceInterval string
- .
- MaintenanceInterval string
- .
- maintenanceInterval String
- .
- maintenanceInterval string
- .
- maintenance_interval str
- .
- maintenanceInterval String
- .
NodePoolNodeConfigKubeletConfig, NodePoolNodeConfigKubeletConfigArgs            
- AllowedUnsafe List<string>Sysctls 
- Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- ContainerLog intMax Files 
- Defines the maximum number of container log files that can be present for a container.
- ContainerLog stringMax Size 
- Defines the maximum size of the container log file before it is rotated.
- CpuCfs boolQuota 
- Enable CPU CFS quota enforcement for containers that specify CPU limits.
- CpuCfs stringQuota Period 
- Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- CpuManager stringPolicy 
- Control the CPU management policy on the node.
- ImageGc intHigh Threshold Percent 
- Defines the percent of disk usage after which image garbage collection is always run.
- ImageGc intLow Threshold Percent 
- Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- ImageMaximum stringGc Age 
- Defines the maximum age an image can be unused before it is garbage collected.
- ImageMinimum stringGc Age 
- Defines the minimum age for an unused image before it is garbage collected.
- InsecureKubelet stringReadonly Port Enabled 
- Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values:TRUE,FALSE.
- PodPids intLimit 
- Controls the maximum number of processes allowed to run in a pod.
- AllowedUnsafe []stringSysctls 
- Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- ContainerLog intMax Files 
- Defines the maximum number of container log files that can be present for a container.
- ContainerLog stringMax Size 
- Defines the maximum size of the container log file before it is rotated.
- CpuCfs boolQuota 
- Enable CPU CFS quota enforcement for containers that specify CPU limits.
- CpuCfs stringQuota Period 
- Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- CpuManager stringPolicy 
- Control the CPU management policy on the node.
- ImageGc intHigh Threshold Percent 
- Defines the percent of disk usage after which image garbage collection is always run.
- ImageGc intLow Threshold Percent 
- Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- ImageMaximum stringGc Age 
- Defines the maximum age an image can be unused before it is garbage collected.
- ImageMinimum stringGc Age 
- Defines the minimum age for an unused image before it is garbage collected.
- InsecureKubelet stringReadonly Port Enabled 
- Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values:TRUE,FALSE.
- PodPids intLimit 
- Controls the maximum number of processes allowed to run in a pod.
- allowedUnsafe List<String>Sysctls 
- Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- containerLog IntegerMax Files 
- Defines the maximum number of container log files that can be present for a container.
- containerLog StringMax Size 
- Defines the maximum size of the container log file before it is rotated.
- cpuCfs BooleanQuota 
- Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpuCfs StringQuota Period 
- Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- cpuManager StringPolicy 
- Control the CPU management policy on the node.
- imageGc IntegerHigh Threshold Percent 
- Defines the percent of disk usage after which image garbage collection is always run.
- imageGc IntegerLow Threshold Percent 
- Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- imageMaximum StringGc Age 
- Defines the maximum age an image can be unused before it is garbage collected.
- imageMinimum StringGc Age 
- Defines the minimum age for an unused image before it is garbage collected.
- insecureKubelet StringReadonly Port Enabled 
- Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values:TRUE,FALSE.
- podPids IntegerLimit 
- Controls the maximum number of processes allowed to run in a pod.
- allowedUnsafe string[]Sysctls 
- Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- containerLog numberMax Files 
- Defines the maximum number of container log files that can be present for a container.
- containerLog stringMax Size 
- Defines the maximum size of the container log file before it is rotated.
- cpuCfs booleanQuota 
- Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpuCfs stringQuota Period 
- Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- cpuManager stringPolicy 
- Control the CPU management policy on the node.
- imageGc numberHigh Threshold Percent 
- Defines the percent of disk usage after which image garbage collection is always run.
- imageGc numberLow Threshold Percent 
- Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- imageMaximum stringGc Age 
- Defines the maximum age an image can be unused before it is garbage collected.
- imageMinimum stringGc Age 
- Defines the minimum age for an unused image before it is garbage collected.
- insecureKubelet stringReadonly Port Enabled 
- Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values:TRUE,FALSE.
- podPids numberLimit 
- Controls the maximum number of processes allowed to run in a pod.
- allowed_unsafe_ Sequence[str]sysctls 
- Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- container_log_ intmax_ files 
- Defines the maximum number of container log files that can be present for a container.
- container_log_ strmax_ size 
- Defines the maximum size of the container log file before it is rotated.
- cpu_cfs_ boolquota 
- Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpu_cfs_ strquota_ period 
- Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- cpu_manager_ strpolicy 
- Control the CPU management policy on the node.
- image_gc_ inthigh_ threshold_ percent 
- Defines the percent of disk usage after which image garbage collection is always run.
- image_gc_ intlow_ threshold_ percent 
- Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- image_maximum_ strgc_ age 
- Defines the maximum age an image can be unused before it is garbage collected.
- image_minimum_ strgc_ age 
- Defines the minimum age for an unused image before it is garbage collected.
- insecure_kubelet_ strreadonly_ port_ enabled 
- Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values:TRUE,FALSE.
- pod_pids_ intlimit 
- Controls the maximum number of processes allowed to run in a pod.
- allowedUnsafe List<String>Sysctls 
- Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- containerLog NumberMax Files 
- Defines the maximum number of container log files that can be present for a container.
- containerLog StringMax Size 
- Defines the maximum size of the container log file before it is rotated.
- cpuCfs BooleanQuota 
- Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpuCfs StringQuota Period 
- Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- cpuManager StringPolicy 
- Control the CPU management policy on the node.
- imageGc NumberHigh Threshold Percent 
- Defines the percent of disk usage after which image garbage collection is always run.
- imageGc NumberLow Threshold Percent 
- Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- imageMaximum StringGc Age 
- Defines the maximum age an image can be unused before it is garbage collected.
- imageMinimum StringGc Age 
- Defines the minimum age for an unused image before it is garbage collected.
- insecureKubelet StringReadonly Port Enabled 
- Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values:TRUE,FALSE.
- podPids NumberLimit 
- Controls the maximum number of processes allowed to run in a pod.
NodePoolNodeConfigLinuxNodeConfig, NodePoolNodeConfigLinuxNodeConfigArgs              
- CgroupMode string
- cgroupMode specifies the cgroup mode to be used on the node.
- HugepagesConfig NodePool Node Config Linux Node Config Hugepages Config 
- Amounts for 2M and 1G hugepages.
- Sysctls Dictionary<string, string>
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- CgroupMode string
- cgroupMode specifies the cgroup mode to be used on the node.
- HugepagesConfig NodePool Node Config Linux Node Config Hugepages Config 
- Amounts for 2M and 1G hugepages.
- Sysctls map[string]string
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroupMode String
- cgroupMode specifies the cgroup mode to be used on the node.
- hugepagesConfig NodePool Node Config Linux Node Config Hugepages Config 
- Amounts for 2M and 1G hugepages.
- sysctls Map<String,String>
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroupMode string
- cgroupMode specifies the cgroup mode to be used on the node.
- hugepagesConfig NodePool Node Config Linux Node Config Hugepages Config 
- Amounts for 2M and 1G hugepages.
- sysctls {[key: string]: string}
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroup_mode str
- cgroupMode specifies the cgroup mode to be used on the node.
- hugepages_config NodePool Node Config Linux Node Config Hugepages Config 
- Amounts for 2M and 1G hugepages.
- sysctls Mapping[str, str]
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroupMode String
- cgroupMode specifies the cgroup mode to be used on the node.
- hugepagesConfig Property Map
- Amounts for 2M and 1G hugepages.
- sysctls Map<String>
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
NodePoolNodeConfigLinuxNodeConfigHugepagesConfig, NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs                  
- HugepageSize1g int
- Amount of 1G hugepages.
- HugepageSize2m int
- Amount of 2M hugepages.
- HugepageSize1g int
- Amount of 1G hugepages.
- HugepageSize2m int
- Amount of 2M hugepages.
- hugepageSize1g Integer
- Amount of 1G hugepages.
- hugepageSize2m Integer
- Amount of 2M hugepages.
- hugepageSize1g number
- Amount of 1G hugepages.
- hugepageSize2m number
- Amount of 2M hugepages.
- hugepage_size1g int
- Amount of 1G hugepages.
- hugepage_size2m int
- Amount of 2M hugepages.
- hugepageSize1g Number
- Amount of 1G hugepages.
- hugepageSize2m Number
- Amount of 2M hugepages.
NodePoolNodeConfigLocalNvmeSsdBlockConfig, NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs                  
- LocalSsd intCount 
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- LocalSsd intCount 
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- localSsd IntegerCount 
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- localSsd numberCount 
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- local_ssd_ intcount 
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- localSsd NumberCount 
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
NodePoolNodeConfigReservationAffinity, NodePoolNodeConfigReservationAffinityArgs            
- ConsumeReservation stringType 
- The type of reservation consumption
Accepted values are:- "UNSPECIFIED": Default value. This should not be used.
- "NO_RESERVATION": Do not consume from any reserved capacity.
- "ANY_RESERVATION": Consume any reservation available.
- "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
 
- Key string
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- Values List<string>
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
- ConsumeReservation stringType 
- The type of reservation consumption
Accepted values are:- "UNSPECIFIED": Default value. This should not be used.
- "NO_RESERVATION": Do not consume from any reserved capacity.
- "ANY_RESERVATION": Consume any reservation available.
- "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
 
- Key string
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- Values []string
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
- consumeReservation StringType 
- The type of reservation consumption
Accepted values are:- "UNSPECIFIED": Default value. This should not be used.
- "NO_RESERVATION": Do not consume from any reserved capacity.
- "ANY_RESERVATION": Consume any reservation available.
- "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
 
- key String
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- values List<String>
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
- consumeReservation stringType 
- The type of reservation consumption
Accepted values are:- "UNSPECIFIED": Default value. This should not be used.
- "NO_RESERVATION": Do not consume from any reserved capacity.
- "ANY_RESERVATION": Consume any reservation available.
- "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
 
- key string
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- values string[]
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
- consume_reservation_ strtype 
- The type of reservation consumption
Accepted values are:- "UNSPECIFIED": Default value. This should not be used.
- "NO_RESERVATION": Do not consume from any reserved capacity.
- "ANY_RESERVATION": Consume any reservation available.
- "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
 
- key str
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- values Sequence[str]
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
- consumeReservation StringType 
- The type of reservation consumption
Accepted values are:- "UNSPECIFIED": Default value. This should not be used.
- "NO_RESERVATION": Do not consume from any reserved capacity.
- "ANY_RESERVATION": Consume any reservation available.
- "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
 
- key String
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- values List<String>
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
NodePoolNodeConfigSandboxConfig, NodePoolNodeConfigSandboxConfigArgs            
- SandboxType string
- Type of the sandbox to use for the node (e.g. 'gvisor')
- SandboxType string
- Type of the sandbox to use for the node (e.g. 'gvisor')
- sandboxType String
- Type of the sandbox to use for the node (e.g. 'gvisor')
- sandboxType string
- Type of the sandbox to use for the node (e.g. 'gvisor')
- sandbox_type str
- Type of the sandbox to use for the node (e.g. 'gvisor')
- sandboxType String
- Type of the sandbox to use for the node (e.g. 'gvisor')
NodePoolNodeConfigSecondaryBootDisk, NodePoolNodeConfigSecondaryBootDiskArgs              
- disk_image str
- Disk image to create the secondary boot disk from
- mode str
- Mode for how the secondary boot disk is used.
NodePoolNodeConfigShieldedInstanceConfig, NodePoolNodeConfigShieldedInstanceConfigArgs              
- EnableIntegrity boolMonitoring 
- Defines whether the instance has integrity monitoring enabled.
- EnableSecure boolBoot 
- Defines whether the instance has Secure Boot enabled.
- EnableIntegrity boolMonitoring 
- Defines whether the instance has integrity monitoring enabled.
- EnableSecure boolBoot 
- Defines whether the instance has Secure Boot enabled.
- enableIntegrity BooleanMonitoring 
- Defines whether the instance has integrity monitoring enabled.
- enableSecure BooleanBoot 
- Defines whether the instance has Secure Boot enabled.
- enableIntegrity booleanMonitoring 
- Defines whether the instance has integrity monitoring enabled.
- enableSecure booleanBoot 
- Defines whether the instance has Secure Boot enabled.
- enable_integrity_ boolmonitoring 
- Defines whether the instance has integrity monitoring enabled.
- enable_secure_ boolboot 
- Defines whether the instance has Secure Boot enabled.
- enableIntegrity BooleanMonitoring 
- Defines whether the instance has integrity monitoring enabled.
- enableSecure BooleanBoot 
- Defines whether the instance has Secure Boot enabled.
NodePoolNodeConfigSoleTenantConfig, NodePoolNodeConfigSoleTenantConfigArgs              
NodePoolNodeConfigSoleTenantConfigNodeAffinity, NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs                  
NodePoolNodeConfigTaint, NodePoolNodeConfigTaintArgs          
NodePoolNodeConfigWorkloadMetadataConfig, NodePoolNodeConfigWorkloadMetadataConfigArgs              
- Mode string
- Mode is the configuration for how to expose metadata to workloads running on the node.
- Mode string
- Mode is the configuration for how to expose metadata to workloads running on the node.
- mode String
- Mode is the configuration for how to expose metadata to workloads running on the node.
- mode string
- Mode is the configuration for how to expose metadata to workloads running on the node.
- mode str
- Mode is the configuration for how to expose metadata to workloads running on the node.
- mode String
- Mode is the configuration for how to expose metadata to workloads running on the node.
NodePoolPlacementPolicy, NodePoolPlacementPolicyArgs        
- Type string
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- PolicyName string
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- TpuTopology string
- The TPU placement topology for pod slice node pool.
- Type string
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- PolicyName string
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- TpuTopology string
- The TPU placement topology for pod slice node pool.
- type String
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policyName String
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpuTopology String
- The TPU placement topology for pod slice node pool.
- type string
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policyName string
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpuTopology string
- The TPU placement topology for pod slice node pool.
- type str
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policy_name str
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpu_topology str
- The TPU placement topology for pod slice node pool.
- type String
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policyName String
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpuTopology String
- The TPU placement topology for pod slice node pool.
NodePoolQueuedProvisioning, NodePoolQueuedProvisioningArgs        
- Enabled bool
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- Enabled bool
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled Boolean
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled boolean
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled bool
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled Boolean
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
NodePoolUpgradeSettings, NodePoolUpgradeSettingsArgs        
- BlueGreen NodeSettings Pool Upgrade Settings Blue Green Settings 
- The settings to adjust blue green upgrades. Structure is documented below
- MaxSurge int
- The number of additional nodes that can be added to the node pool during
an upgrade. Increasing max_surgeraises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
- int
- The number of nodes that can be simultaneously unavailable during an upgrade. Increasing - max_unavailableraises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.- max_surgeand- max_unavailablemust not be negative and at least one of them must be greater than zero.
- Strategy string
- The upgrade strategy to be used for upgrading the nodes.
- BlueGreen NodeSettings Pool Upgrade Settings Blue Green Settings 
- The settings to adjust blue green upgrades. Structure is documented below
- MaxSurge int
- The number of additional nodes that can be added to the node pool during
an upgrade. Increasing max_surgeraises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
- int
- The number of nodes that can be simultaneously unavailable during an upgrade. Increasing - max_unavailableraises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.- max_surgeand- max_unavailablemust not be negative and at least one of them must be greater than zero.
- Strategy string
- The upgrade strategy to be used for upgrading the nodes.
- blueGreen NodeSettings Pool Upgrade Settings Blue Green Settings 
- The settings to adjust blue green upgrades. Structure is documented below
- maxSurge Integer
- The number of additional nodes that can be added to the node pool during
an upgrade. Increasing max_surgeraises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
- Integer
- The number of nodes that can be simultaneously unavailable during an upgrade. Increasing - max_unavailableraises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.- max_surgeand- max_unavailablemust not be negative and at least one of them must be greater than zero.
- strategy String
- The upgrade strategy to be used for upgrading the nodes.
- blueGreen NodeSettings Pool Upgrade Settings Blue Green Settings 
- The settings to adjust blue green upgrades. Structure is documented below
- maxSurge number
- The number of additional nodes that can be added to the node pool during
an upgrade. Increasing max_surgeraises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
- number
- The number of nodes that can be simultaneously unavailable during an upgrade. Increasing - max_unavailableraises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.- max_surgeand- max_unavailablemust not be negative and at least one of them must be greater than zero.
- strategy string
- The upgrade strategy to be used for upgrading the nodes.
- blue_green_ Nodesettings Pool Upgrade Settings Blue Green Settings 
- The settings to adjust blue green upgrades. Structure is documented below
- max_surge int
- The number of additional nodes that can be added to the node pool during
an upgrade. Increasing max_surgeraises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
- int
- The number of nodes that can be simultaneously unavailable during an upgrade. Increasing - max_unavailableraises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.- max_surgeand- max_unavailablemust not be negative and at least one of them must be greater than zero.
- strategy str
- The upgrade strategy to be used for upgrading the nodes.
- blueGreen Property MapSettings 
- The settings to adjust blue green upgrades. Structure is documented below
- maxSurge Number
- The number of additional nodes that can be added to the node pool during
an upgrade. Increasing max_surgeraises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
- Number
- The number of nodes that can be simultaneously unavailable during an upgrade. Increasing - max_unavailableraises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.- max_surgeand- max_unavailablemust not be negative and at least one of them must be greater than zero.
- strategy String
- The upgrade strategy to be used for upgrading the nodes.
NodePoolUpgradeSettingsBlueGreenSettings, NodePoolUpgradeSettingsBlueGreenSettingsArgs              
- StandardRollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy 
- Specifies the standard policy settings for blue-green upgrades.
- NodePool stringSoak Duration 
- Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- StandardRollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy 
- Specifies the standard policy settings for blue-green upgrades.
- NodePool stringSoak Duration 
- Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standardRollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy 
- Specifies the standard policy settings for blue-green upgrades.
- nodePool StringSoak Duration 
- Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standardRollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy 
- Specifies the standard policy settings for blue-green upgrades.
- nodePool stringSoak Duration 
- Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard_rollout_ Nodepolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy 
- Specifies the standard policy settings for blue-green upgrades.
- node_pool_ strsoak_ duration 
- Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standardRollout Property MapPolicy 
- Specifies the standard policy settings for blue-green upgrades.
- nodePool StringSoak Duration 
- Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy, NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs                    
- BatchNode intCount 
- Number of blue nodes to drain in a batch.
- BatchPercentage double
- Percentage of the blue pool nodes to drain in a batch.
- BatchSoak stringDuration 
- Soak time after each batch gets drained.
- BatchNode intCount 
- Number of blue nodes to drain in a batch.
- BatchPercentage float64
- Percentage of the blue pool nodes to drain in a batch.
- BatchSoak stringDuration 
- Soak time after each batch gets drained.
- batchNode IntegerCount 
- Number of blue nodes to drain in a batch.
- batchPercentage Double
- Percentage of the blue pool nodes to drain in a batch.
- batchSoak StringDuration 
- Soak time after each batch gets drained.
- batchNode numberCount 
- Number of blue nodes to drain in a batch.
- batchPercentage number
- Percentage of the blue pool nodes to drain in a batch.
- batchSoak stringDuration 
- Soak time after each batch gets drained.
- batch_node_ intcount 
- Number of blue nodes to drain in a batch.
- batch_percentage float
- Percentage of the blue pool nodes to drain in a batch.
- batch_soak_ strduration 
- Soak time after each batch gets drained.
- batchNode NumberCount 
- Number of blue nodes to drain in a batch.
- batchPercentage Number
- Percentage of the blue pool nodes to drain in a batch.
- batchSoak StringDuration 
- Soak time after each batch gets drained.
Import
Node pools can be imported using the project, location, cluster and name. If
the project is omitted, the project value in the provider configuration will be used. Examples:
- {{project_id}}/{{location}}/{{cluster_id}}/{{pool_id}}
- {{location}}/{{cluster_id}}/{{pool_id}}
When using the pulumi import command, node pools can be imported using one of the formats above. For example:
$ pulumi import gcp:container/nodePool:NodePool default {{project_id}}/{{location}}/{{cluster_id}}/{{pool_id}}
$ pulumi import gcp:container/nodePool:NodePool default {{location}}/{{cluster_id}}/{{pool_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the google-betaTerraform Provider.