gcp.dataflow.Pipeline
Explore with Pulumi AI
The main pipeline entity and all the necessary metadata for launching and managing linked jobs.
To get more information about Pipeline, see:
- API documentation
- How-to Guides
Example Usage
Data Pipeline Pipeline
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const serviceAccount = new gcp.serviceaccount.Account("service_account", {
    accountId: "my-account",
    displayName: "Service Account",
});
const primary = new gcp.dataflow.Pipeline("primary", {
    name: "my-pipeline",
    displayName: "my-pipeline",
    type: "PIPELINE_TYPE_BATCH",
    state: "STATE_ACTIVE",
    region: "us-central1",
    workload: {
        dataflowLaunchTemplateRequest: {
            projectId: "my-project",
            gcsPath: "gs://my-bucket/path",
            launchParameters: {
                jobName: "my-job",
                parameters: {
                    name: "wrench",
                },
                environment: {
                    numWorkers: 5,
                    maxWorkers: 5,
                    zone: "us-centra1-a",
                    serviceAccountEmail: serviceAccount.email,
                    network: "default",
                    tempLocation: "gs://my-bucket/tmp_dir",
                    bypassTempDirValidation: false,
                    machineType: "E2",
                    additionalUserLabels: {
                        context: "test",
                    },
                    workerRegion: "us-central1",
                    workerZone: "us-central1-a",
                    enableStreamingEngine: false,
                },
                update: false,
                transformNameMapping: {
                    name: "wrench",
                },
            },
            location: "us-central1",
        },
    },
    scheduleInfo: {
        schedule: "* */2 * * *",
    },
});
import pulumi
import pulumi_gcp as gcp
service_account = gcp.serviceaccount.Account("service_account",
    account_id="my-account",
    display_name="Service Account")
primary = gcp.dataflow.Pipeline("primary",
    name="my-pipeline",
    display_name="my-pipeline",
    type="PIPELINE_TYPE_BATCH",
    state="STATE_ACTIVE",
    region="us-central1",
    workload={
        "dataflow_launch_template_request": {
            "project_id": "my-project",
            "gcs_path": "gs://my-bucket/path",
            "launch_parameters": {
                "job_name": "my-job",
                "parameters": {
                    "name": "wrench",
                },
                "environment": {
                    "num_workers": 5,
                    "max_workers": 5,
                    "zone": "us-centra1-a",
                    "service_account_email": service_account.email,
                    "network": "default",
                    "temp_location": "gs://my-bucket/tmp_dir",
                    "bypass_temp_dir_validation": False,
                    "machine_type": "E2",
                    "additional_user_labels": {
                        "context": "test",
                    },
                    "worker_region": "us-central1",
                    "worker_zone": "us-central1-a",
                    "enable_streaming_engine": False,
                },
                "update": False,
                "transform_name_mapping": {
                    "name": "wrench",
                },
            },
            "location": "us-central1",
        },
    },
    schedule_info={
        "schedule": "* */2 * * *",
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataflow"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		serviceAccount, err := serviceaccount.NewAccount(ctx, "service_account", &serviceaccount.AccountArgs{
			AccountId:   pulumi.String("my-account"),
			DisplayName: pulumi.String("Service Account"),
		})
		if err != nil {
			return err
		}
		_, err = dataflow.NewPipeline(ctx, "primary", &dataflow.PipelineArgs{
			Name:        pulumi.String("my-pipeline"),
			DisplayName: pulumi.String("my-pipeline"),
			Type:        pulumi.String("PIPELINE_TYPE_BATCH"),
			State:       pulumi.String("STATE_ACTIVE"),
			Region:      pulumi.String("us-central1"),
			Workload: &dataflow.PipelineWorkloadArgs{
				DataflowLaunchTemplateRequest: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestArgs{
					ProjectId: pulumi.String("my-project"),
					GcsPath:   pulumi.String("gs://my-bucket/path"),
					LaunchParameters: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs{
						JobName: pulumi.String("my-job"),
						Parameters: pulumi.StringMap{
							"name": pulumi.String("wrench"),
						},
						Environment: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs{
							NumWorkers:              pulumi.Int(5),
							MaxWorkers:              pulumi.Int(5),
							Zone:                    pulumi.String("us-centra1-a"),
							ServiceAccountEmail:     serviceAccount.Email,
							Network:                 pulumi.String("default"),
							TempLocation:            pulumi.String("gs://my-bucket/tmp_dir"),
							BypassTempDirValidation: pulumi.Bool(false),
							MachineType:             pulumi.String("E2"),
							AdditionalUserLabels: pulumi.StringMap{
								"context": pulumi.String("test"),
							},
							WorkerRegion:          pulumi.String("us-central1"),
							WorkerZone:            pulumi.String("us-central1-a"),
							EnableStreamingEngine: pulumi.Bool(false),
						},
						Update: pulumi.Bool(false),
						TransformNameMapping: pulumi.StringMap{
							"name": pulumi.String("wrench"),
						},
					},
					Location: pulumi.String("us-central1"),
				},
			},
			ScheduleInfo: &dataflow.PipelineScheduleInfoArgs{
				Schedule: pulumi.String("* */2 * * *"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var serviceAccount = new Gcp.ServiceAccount.Account("service_account", new()
    {
        AccountId = "my-account",
        DisplayName = "Service Account",
    });
    var primary = new Gcp.Dataflow.Pipeline("primary", new()
    {
        Name = "my-pipeline",
        DisplayName = "my-pipeline",
        Type = "PIPELINE_TYPE_BATCH",
        State = "STATE_ACTIVE",
        Region = "us-central1",
        Workload = new Gcp.Dataflow.Inputs.PipelineWorkloadArgs
        {
            DataflowLaunchTemplateRequest = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestArgs
            {
                ProjectId = "my-project",
                GcsPath = "gs://my-bucket/path",
                LaunchParameters = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs
                {
                    JobName = "my-job",
                    Parameters = 
                    {
                        { "name", "wrench" },
                    },
                    Environment = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs
                    {
                        NumWorkers = 5,
                        MaxWorkers = 5,
                        Zone = "us-centra1-a",
                        ServiceAccountEmail = serviceAccount.Email,
                        Network = "default",
                        TempLocation = "gs://my-bucket/tmp_dir",
                        BypassTempDirValidation = false,
                        MachineType = "E2",
                        AdditionalUserLabels = 
                        {
                            { "context", "test" },
                        },
                        WorkerRegion = "us-central1",
                        WorkerZone = "us-central1-a",
                        EnableStreamingEngine = false,
                    },
                    Update = false,
                    TransformNameMapping = 
                    {
                        { "name", "wrench" },
                    },
                },
                Location = "us-central1",
            },
        },
        ScheduleInfo = new Gcp.Dataflow.Inputs.PipelineScheduleInfoArgs
        {
            Schedule = "* */2 * * *",
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceaccount.Account;
import com.pulumi.gcp.serviceaccount.AccountArgs;
import com.pulumi.gcp.dataflow.Pipeline;
import com.pulumi.gcp.dataflow.PipelineArgs;
import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadArgs;
import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestArgs;
import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs;
import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs;
import com.pulumi.gcp.dataflow.inputs.PipelineScheduleInfoArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var serviceAccount = new Account("serviceAccount", AccountArgs.builder()
            .accountId("my-account")
            .displayName("Service Account")
            .build());
        var primary = new Pipeline("primary", PipelineArgs.builder()
            .name("my-pipeline")
            .displayName("my-pipeline")
            .type("PIPELINE_TYPE_BATCH")
            .state("STATE_ACTIVE")
            .region("us-central1")
            .workload(PipelineWorkloadArgs.builder()
                .dataflowLaunchTemplateRequest(PipelineWorkloadDataflowLaunchTemplateRequestArgs.builder()
                    .projectId("my-project")
                    .gcsPath("gs://my-bucket/path")
                    .launchParameters(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs.builder()
                        .jobName("my-job")
                        .parameters(Map.of("name", "wrench"))
                        .environment(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs.builder()
                            .numWorkers(5)
                            .maxWorkers(5)
                            .zone("us-centra1-a")
                            .serviceAccountEmail(serviceAccount.email())
                            .network("default")
                            .tempLocation("gs://my-bucket/tmp_dir")
                            .bypassTempDirValidation(false)
                            .machineType("E2")
                            .additionalUserLabels(Map.of("context", "test"))
                            .workerRegion("us-central1")
                            .workerZone("us-central1-a")
                            .enableStreamingEngine("false")
                            .build())
                        .update(false)
                        .transformNameMapping(Map.of("name", "wrench"))
                        .build())
                    .location("us-central1")
                    .build())
                .build())
            .scheduleInfo(PipelineScheduleInfoArgs.builder()
                .schedule("* */2 * * *")
                .build())
            .build());
    }
}
resources:
  serviceAccount:
    type: gcp:serviceaccount:Account
    name: service_account
    properties:
      accountId: my-account
      displayName: Service Account
  primary:
    type: gcp:dataflow:Pipeline
    properties:
      name: my-pipeline
      displayName: my-pipeline
      type: PIPELINE_TYPE_BATCH
      state: STATE_ACTIVE
      region: us-central1
      workload:
        dataflowLaunchTemplateRequest:
          projectId: my-project
          gcsPath: gs://my-bucket/path
          launchParameters:
            jobName: my-job
            parameters:
              name: wrench
            environment:
              numWorkers: 5
              maxWorkers: 5
              zone: us-centra1-a
              serviceAccountEmail: ${serviceAccount.email}
              network: default
              tempLocation: gs://my-bucket/tmp_dir
              bypassTempDirValidation: false
              machineType: E2
              additionalUserLabels:
                context: test
              workerRegion: us-central1
              workerZone: us-central1-a
              enableStreamingEngine: 'false'
            update: false
            transformNameMapping:
              name: wrench
          location: us-central1
      scheduleInfo:
        schedule: '* */2 * * *'
Create Pipeline Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Pipeline(name: string, args: PipelineArgs, opts?: CustomResourceOptions);@overload
def Pipeline(resource_name: str,
             args: PipelineArgs,
             opts: Optional[ResourceOptions] = None)
@overload
def Pipeline(resource_name: str,
             opts: Optional[ResourceOptions] = None,
             state: Optional[str] = None,
             type: Optional[str] = None,
             display_name: Optional[str] = None,
             name: Optional[str] = None,
             pipeline_sources: Optional[Mapping[str, str]] = None,
             project: Optional[str] = None,
             region: Optional[str] = None,
             schedule_info: Optional[PipelineScheduleInfoArgs] = None,
             scheduler_service_account_email: Optional[str] = None,
             workload: Optional[PipelineWorkloadArgs] = None)func NewPipeline(ctx *Context, name string, args PipelineArgs, opts ...ResourceOption) (*Pipeline, error)public Pipeline(string name, PipelineArgs args, CustomResourceOptions? opts = null)
public Pipeline(String name, PipelineArgs args)
public Pipeline(String name, PipelineArgs args, CustomResourceOptions options)
type: gcp:dataflow:Pipeline
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var pipelineResource = new Gcp.Dataflow.Pipeline("pipelineResource", new()
{
    State = "string",
    Type = "string",
    DisplayName = "string",
    Name = "string",
    PipelineSources = 
    {
        { "string", "string" },
    },
    Project = "string",
    Region = "string",
    ScheduleInfo = new Gcp.Dataflow.Inputs.PipelineScheduleInfoArgs
    {
        NextJobTime = "string",
        Schedule = "string",
        TimeZone = "string",
    },
    SchedulerServiceAccountEmail = "string",
    Workload = new Gcp.Dataflow.Inputs.PipelineWorkloadArgs
    {
        DataflowFlexTemplateRequest = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowFlexTemplateRequestArgs
        {
            LaunchParameter = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterArgs
            {
                JobName = "string",
                ContainerSpecGcsPath = "string",
                Environment = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironmentArgs
                {
                    AdditionalExperiments = new[]
                    {
                        "string",
                    },
                    AdditionalUserLabels = 
                    {
                        { "string", "string" },
                    },
                    EnableStreamingEngine = false,
                    FlexrsGoal = "string",
                    IpConfiguration = "string",
                    KmsKeyName = "string",
                    MachineType = "string",
                    MaxWorkers = 0,
                    Network = "string",
                    NumWorkers = 0,
                    ServiceAccountEmail = "string",
                    Subnetwork = "string",
                    TempLocation = "string",
                    WorkerRegion = "string",
                    WorkerZone = "string",
                    Zone = "string",
                },
                LaunchOptions = 
                {
                    { "string", "string" },
                },
                Parameters = 
                {
                    { "string", "string" },
                },
                TransformNameMappings = 
                {
                    { "string", "string" },
                },
                Update = false,
            },
            Location = "string",
            ProjectId = "string",
            ValidateOnly = false,
        },
        DataflowLaunchTemplateRequest = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestArgs
        {
            ProjectId = "string",
            GcsPath = "string",
            LaunchParameters = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs
            {
                JobName = "string",
                Environment = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs
                {
                    AdditionalExperiments = new[]
                    {
                        "string",
                    },
                    AdditionalUserLabels = 
                    {
                        { "string", "string" },
                    },
                    BypassTempDirValidation = false,
                    EnableStreamingEngine = false,
                    IpConfiguration = "string",
                    KmsKeyName = "string",
                    MachineType = "string",
                    MaxWorkers = 0,
                    Network = "string",
                    NumWorkers = 0,
                    ServiceAccountEmail = "string",
                    Subnetwork = "string",
                    TempLocation = "string",
                    WorkerRegion = "string",
                    WorkerZone = "string",
                    Zone = "string",
                },
                Parameters = 
                {
                    { "string", "string" },
                },
                TransformNameMapping = 
                {
                    { "string", "string" },
                },
                Update = false,
            },
            Location = "string",
            ValidateOnly = false,
        },
    },
});
example, err := dataflow.NewPipeline(ctx, "pipelineResource", &dataflow.PipelineArgs{
	State:       pulumi.String("string"),
	Type:        pulumi.String("string"),
	DisplayName: pulumi.String("string"),
	Name:        pulumi.String("string"),
	PipelineSources: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Project: pulumi.String("string"),
	Region:  pulumi.String("string"),
	ScheduleInfo: &dataflow.PipelineScheduleInfoArgs{
		NextJobTime: pulumi.String("string"),
		Schedule:    pulumi.String("string"),
		TimeZone:    pulumi.String("string"),
	},
	SchedulerServiceAccountEmail: pulumi.String("string"),
	Workload: &dataflow.PipelineWorkloadArgs{
		DataflowFlexTemplateRequest: &dataflow.PipelineWorkloadDataflowFlexTemplateRequestArgs{
			LaunchParameter: &dataflow.PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterArgs{
				JobName:              pulumi.String("string"),
				ContainerSpecGcsPath: pulumi.String("string"),
				Environment: &dataflow.PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironmentArgs{
					AdditionalExperiments: pulumi.StringArray{
						pulumi.String("string"),
					},
					AdditionalUserLabels: pulumi.StringMap{
						"string": pulumi.String("string"),
					},
					EnableStreamingEngine: pulumi.Bool(false),
					FlexrsGoal:            pulumi.String("string"),
					IpConfiguration:       pulumi.String("string"),
					KmsKeyName:            pulumi.String("string"),
					MachineType:           pulumi.String("string"),
					MaxWorkers:            pulumi.Int(0),
					Network:               pulumi.String("string"),
					NumWorkers:            pulumi.Int(0),
					ServiceAccountEmail:   pulumi.String("string"),
					Subnetwork:            pulumi.String("string"),
					TempLocation:          pulumi.String("string"),
					WorkerRegion:          pulumi.String("string"),
					WorkerZone:            pulumi.String("string"),
					Zone:                  pulumi.String("string"),
				},
				LaunchOptions: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				Parameters: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				TransformNameMappings: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				Update: pulumi.Bool(false),
			},
			Location:     pulumi.String("string"),
			ProjectId:    pulumi.String("string"),
			ValidateOnly: pulumi.Bool(false),
		},
		DataflowLaunchTemplateRequest: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestArgs{
			ProjectId: pulumi.String("string"),
			GcsPath:   pulumi.String("string"),
			LaunchParameters: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs{
				JobName: pulumi.String("string"),
				Environment: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs{
					AdditionalExperiments: pulumi.StringArray{
						pulumi.String("string"),
					},
					AdditionalUserLabels: pulumi.StringMap{
						"string": pulumi.String("string"),
					},
					BypassTempDirValidation: pulumi.Bool(false),
					EnableStreamingEngine:   pulumi.Bool(false),
					IpConfiguration:         pulumi.String("string"),
					KmsKeyName:              pulumi.String("string"),
					MachineType:             pulumi.String("string"),
					MaxWorkers:              pulumi.Int(0),
					Network:                 pulumi.String("string"),
					NumWorkers:              pulumi.Int(0),
					ServiceAccountEmail:     pulumi.String("string"),
					Subnetwork:              pulumi.String("string"),
					TempLocation:            pulumi.String("string"),
					WorkerRegion:            pulumi.String("string"),
					WorkerZone:              pulumi.String("string"),
					Zone:                    pulumi.String("string"),
				},
				Parameters: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				TransformNameMapping: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				Update: pulumi.Bool(false),
			},
			Location:     pulumi.String("string"),
			ValidateOnly: pulumi.Bool(false),
		},
	},
})
var pipelineResource = new Pipeline("pipelineResource", PipelineArgs.builder()
    .state("string")
    .type("string")
    .displayName("string")
    .name("string")
    .pipelineSources(Map.of("string", "string"))
    .project("string")
    .region("string")
    .scheduleInfo(PipelineScheduleInfoArgs.builder()
        .nextJobTime("string")
        .schedule("string")
        .timeZone("string")
        .build())
    .schedulerServiceAccountEmail("string")
    .workload(PipelineWorkloadArgs.builder()
        .dataflowFlexTemplateRequest(PipelineWorkloadDataflowFlexTemplateRequestArgs.builder()
            .launchParameter(PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterArgs.builder()
                .jobName("string")
                .containerSpecGcsPath("string")
                .environment(PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironmentArgs.builder()
                    .additionalExperiments("string")
                    .additionalUserLabels(Map.of("string", "string"))
                    .enableStreamingEngine(false)
                    .flexrsGoal("string")
                    .ipConfiguration("string")
                    .kmsKeyName("string")
                    .machineType("string")
                    .maxWorkers(0)
                    .network("string")
                    .numWorkers(0)
                    .serviceAccountEmail("string")
                    .subnetwork("string")
                    .tempLocation("string")
                    .workerRegion("string")
                    .workerZone("string")
                    .zone("string")
                    .build())
                .launchOptions(Map.of("string", "string"))
                .parameters(Map.of("string", "string"))
                .transformNameMappings(Map.of("string", "string"))
                .update(false)
                .build())
            .location("string")
            .projectId("string")
            .validateOnly(false)
            .build())
        .dataflowLaunchTemplateRequest(PipelineWorkloadDataflowLaunchTemplateRequestArgs.builder()
            .projectId("string")
            .gcsPath("string")
            .launchParameters(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs.builder()
                .jobName("string")
                .environment(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs.builder()
                    .additionalExperiments("string")
                    .additionalUserLabels(Map.of("string", "string"))
                    .bypassTempDirValidation(false)
                    .enableStreamingEngine(false)
                    .ipConfiguration("string")
                    .kmsKeyName("string")
                    .machineType("string")
                    .maxWorkers(0)
                    .network("string")
                    .numWorkers(0)
                    .serviceAccountEmail("string")
                    .subnetwork("string")
                    .tempLocation("string")
                    .workerRegion("string")
                    .workerZone("string")
                    .zone("string")
                    .build())
                .parameters(Map.of("string", "string"))
                .transformNameMapping(Map.of("string", "string"))
                .update(false)
                .build())
            .location("string")
            .validateOnly(false)
            .build())
        .build())
    .build());
pipeline_resource = gcp.dataflow.Pipeline("pipelineResource",
    state="string",
    type="string",
    display_name="string",
    name="string",
    pipeline_sources={
        "string": "string",
    },
    project="string",
    region="string",
    schedule_info={
        "next_job_time": "string",
        "schedule": "string",
        "time_zone": "string",
    },
    scheduler_service_account_email="string",
    workload={
        "dataflow_flex_template_request": {
            "launch_parameter": {
                "job_name": "string",
                "container_spec_gcs_path": "string",
                "environment": {
                    "additional_experiments": ["string"],
                    "additional_user_labels": {
                        "string": "string",
                    },
                    "enable_streaming_engine": False,
                    "flexrs_goal": "string",
                    "ip_configuration": "string",
                    "kms_key_name": "string",
                    "machine_type": "string",
                    "max_workers": 0,
                    "network": "string",
                    "num_workers": 0,
                    "service_account_email": "string",
                    "subnetwork": "string",
                    "temp_location": "string",
                    "worker_region": "string",
                    "worker_zone": "string",
                    "zone": "string",
                },
                "launch_options": {
                    "string": "string",
                },
                "parameters": {
                    "string": "string",
                },
                "transform_name_mappings": {
                    "string": "string",
                },
                "update": False,
            },
            "location": "string",
            "project_id": "string",
            "validate_only": False,
        },
        "dataflow_launch_template_request": {
            "project_id": "string",
            "gcs_path": "string",
            "launch_parameters": {
                "job_name": "string",
                "environment": {
                    "additional_experiments": ["string"],
                    "additional_user_labels": {
                        "string": "string",
                    },
                    "bypass_temp_dir_validation": False,
                    "enable_streaming_engine": False,
                    "ip_configuration": "string",
                    "kms_key_name": "string",
                    "machine_type": "string",
                    "max_workers": 0,
                    "network": "string",
                    "num_workers": 0,
                    "service_account_email": "string",
                    "subnetwork": "string",
                    "temp_location": "string",
                    "worker_region": "string",
                    "worker_zone": "string",
                    "zone": "string",
                },
                "parameters": {
                    "string": "string",
                },
                "transform_name_mapping": {
                    "string": "string",
                },
                "update": False,
            },
            "location": "string",
            "validate_only": False,
        },
    })
const pipelineResource = new gcp.dataflow.Pipeline("pipelineResource", {
    state: "string",
    type: "string",
    displayName: "string",
    name: "string",
    pipelineSources: {
        string: "string",
    },
    project: "string",
    region: "string",
    scheduleInfo: {
        nextJobTime: "string",
        schedule: "string",
        timeZone: "string",
    },
    schedulerServiceAccountEmail: "string",
    workload: {
        dataflowFlexTemplateRequest: {
            launchParameter: {
                jobName: "string",
                containerSpecGcsPath: "string",
                environment: {
                    additionalExperiments: ["string"],
                    additionalUserLabels: {
                        string: "string",
                    },
                    enableStreamingEngine: false,
                    flexrsGoal: "string",
                    ipConfiguration: "string",
                    kmsKeyName: "string",
                    machineType: "string",
                    maxWorkers: 0,
                    network: "string",
                    numWorkers: 0,
                    serviceAccountEmail: "string",
                    subnetwork: "string",
                    tempLocation: "string",
                    workerRegion: "string",
                    workerZone: "string",
                    zone: "string",
                },
                launchOptions: {
                    string: "string",
                },
                parameters: {
                    string: "string",
                },
                transformNameMappings: {
                    string: "string",
                },
                update: false,
            },
            location: "string",
            projectId: "string",
            validateOnly: false,
        },
        dataflowLaunchTemplateRequest: {
            projectId: "string",
            gcsPath: "string",
            launchParameters: {
                jobName: "string",
                environment: {
                    additionalExperiments: ["string"],
                    additionalUserLabels: {
                        string: "string",
                    },
                    bypassTempDirValidation: false,
                    enableStreamingEngine: false,
                    ipConfiguration: "string",
                    kmsKeyName: "string",
                    machineType: "string",
                    maxWorkers: 0,
                    network: "string",
                    numWorkers: 0,
                    serviceAccountEmail: "string",
                    subnetwork: "string",
                    tempLocation: "string",
                    workerRegion: "string",
                    workerZone: "string",
                    zone: "string",
                },
                parameters: {
                    string: "string",
                },
                transformNameMapping: {
                    string: "string",
                },
                update: false,
            },
            location: "string",
            validateOnly: false,
        },
    },
});
type: gcp:dataflow:Pipeline
properties:
    displayName: string
    name: string
    pipelineSources:
        string: string
    project: string
    region: string
    scheduleInfo:
        nextJobTime: string
        schedule: string
        timeZone: string
    schedulerServiceAccountEmail: string
    state: string
    type: string
    workload:
        dataflowFlexTemplateRequest:
            launchParameter:
                containerSpecGcsPath: string
                environment:
                    additionalExperiments:
                        - string
                    additionalUserLabels:
                        string: string
                    enableStreamingEngine: false
                    flexrsGoal: string
                    ipConfiguration: string
                    kmsKeyName: string
                    machineType: string
                    maxWorkers: 0
                    network: string
                    numWorkers: 0
                    serviceAccountEmail: string
                    subnetwork: string
                    tempLocation: string
                    workerRegion: string
                    workerZone: string
                    zone: string
                jobName: string
                launchOptions:
                    string: string
                parameters:
                    string: string
                transformNameMappings:
                    string: string
                update: false
            location: string
            projectId: string
            validateOnly: false
        dataflowLaunchTemplateRequest:
            gcsPath: string
            launchParameters:
                environment:
                    additionalExperiments:
                        - string
                    additionalUserLabels:
                        string: string
                    bypassTempDirValidation: false
                    enableStreamingEngine: false
                    ipConfiguration: string
                    kmsKeyName: string
                    machineType: string
                    maxWorkers: 0
                    network: string
                    numWorkers: 0
                    serviceAccountEmail: string
                    subnetwork: string
                    tempLocation: string
                    workerRegion: string
                    workerZone: string
                    zone: string
                jobName: string
                parameters:
                    string: string
                transformNameMapping:
                    string: string
                update: false
            location: string
            projectId: string
            validateOnly: false
Pipeline Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Pipeline resource accepts the following input properties:
- State string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- Type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- DisplayName string
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- Name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- PipelineSources Dictionary<string, string>
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- A reference to the region
- ScheduleInfo PipelineSchedule Info 
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- SchedulerService stringAccount Email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- Workload
PipelineWorkload 
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- State string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- Type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- DisplayName string
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- Name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- PipelineSources map[string]string
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- A reference to the region
- ScheduleInfo PipelineSchedule Info Args 
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- SchedulerService stringAccount Email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- Workload
PipelineWorkload Args 
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- state String
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- type String
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- displayName String
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- name String
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipelineSources Map<String,String>
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- A reference to the region
- scheduleInfo PipelineSchedule Info 
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- schedulerService StringAccount Email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- workload
PipelineWorkload 
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- state string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- displayName string
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipelineSources {[key: string]: string}
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region string
- A reference to the region
- scheduleInfo PipelineSchedule Info 
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- schedulerService stringAccount Email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- workload
PipelineWorkload 
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- state str
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- type str
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- display_name str
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- name str
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipeline_sources Mapping[str, str]
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region str
- A reference to the region
- schedule_info PipelineSchedule Info Args 
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- scheduler_service_ straccount_ email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- workload
PipelineWorkload Args 
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- state String
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- type String
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- displayName String
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- name String
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipelineSources Map<String>
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- A reference to the region
- scheduleInfo Property Map
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- schedulerService StringAccount Email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- workload Property Map
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Pipeline resource produces the following output properties:
- CreateTime string
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Id string
- The provider-assigned unique ID for this managed resource.
- JobCount int
- Number of jobs.
- LastUpdate stringTime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- CreateTime string
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Id string
- The provider-assigned unique ID for this managed resource.
- JobCount int
- Number of jobs.
- LastUpdate stringTime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- createTime String
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- id String
- The provider-assigned unique ID for this managed resource.
- jobCount Integer
- Number of jobs.
- lastUpdate StringTime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- createTime string
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- id string
- The provider-assigned unique ID for this managed resource.
- jobCount number
- Number of jobs.
- lastUpdate stringTime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- create_time str
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- id str
- The provider-assigned unique ID for this managed resource.
- job_count int
- Number of jobs.
- last_update_ strtime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- createTime String
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- id String
- The provider-assigned unique ID for this managed resource.
- jobCount Number
- Number of jobs.
- lastUpdate StringTime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
Look up Existing Pipeline Resource
Get an existing Pipeline resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: PipelineState, opts?: CustomResourceOptions): Pipeline@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        create_time: Optional[str] = None,
        display_name: Optional[str] = None,
        job_count: Optional[int] = None,
        last_update_time: Optional[str] = None,
        name: Optional[str] = None,
        pipeline_sources: Optional[Mapping[str, str]] = None,
        project: Optional[str] = None,
        region: Optional[str] = None,
        schedule_info: Optional[PipelineScheduleInfoArgs] = None,
        scheduler_service_account_email: Optional[str] = None,
        state: Optional[str] = None,
        type: Optional[str] = None,
        workload: Optional[PipelineWorkloadArgs] = None) -> Pipelinefunc GetPipeline(ctx *Context, name string, id IDInput, state *PipelineState, opts ...ResourceOption) (*Pipeline, error)public static Pipeline Get(string name, Input<string> id, PipelineState? state, CustomResourceOptions? opts = null)public static Pipeline get(String name, Output<String> id, PipelineState state, CustomResourceOptions options)resources:  _:    type: gcp:dataflow:Pipeline    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- CreateTime string
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- DisplayName string
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- JobCount int
- Number of jobs.
- LastUpdate stringTime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- PipelineSources Dictionary<string, string>
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- A reference to the region
- ScheduleInfo PipelineSchedule Info 
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- SchedulerService stringAccount Email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- State string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- Type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- Workload
PipelineWorkload 
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- CreateTime string
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- DisplayName string
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- JobCount int
- Number of jobs.
- LastUpdate stringTime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- PipelineSources map[string]string
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- A reference to the region
- ScheduleInfo PipelineSchedule Info Args 
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- SchedulerService stringAccount Email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- State string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- Type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- Workload
PipelineWorkload Args 
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- createTime String
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- displayName String
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- jobCount Integer
- Number of jobs.
- lastUpdate StringTime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- name String
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipelineSources Map<String,String>
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- A reference to the region
- scheduleInfo PipelineSchedule Info 
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- schedulerService StringAccount Email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- state String
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- type String
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- workload
PipelineWorkload 
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- createTime string
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- displayName string
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- jobCount number
- Number of jobs.
- lastUpdate stringTime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipelineSources {[key: string]: string}
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region string
- A reference to the region
- scheduleInfo PipelineSchedule Info 
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- schedulerService stringAccount Email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- state string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- workload
PipelineWorkload 
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- create_time str
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- display_name str
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- job_count int
- Number of jobs.
- last_update_ strtime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- name str
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipeline_sources Mapping[str, str]
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region str
- A reference to the region
- schedule_info PipelineSchedule Info Args 
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- scheduler_service_ straccount_ email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- state str
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- type str
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- workload
PipelineWorkload Args 
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- createTime String
- The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- displayName String
- The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- jobCount Number
- Number of jobs.
- lastUpdate StringTime 
- The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- name String
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipelineSources Map<String>
- The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- A reference to the region
- scheduleInfo Property Map
- Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- schedulerService StringAccount Email 
- Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- state String
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are: STATE_UNSPECIFIED,STATE_RESUMING,STATE_ACTIVE,STATE_STOPPING,STATE_ARCHIVED,STATE_PAUSED.
- type String
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are: PIPELINE_TYPE_UNSPECIFIED,PIPELINE_TYPE_BATCH,PIPELINE_TYPE_STREAMING.
- workload Property Map
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
Supporting Types
PipelineScheduleInfo, PipelineScheduleInfoArgs      
- NextJob stringTime 
- (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Schedule string
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- TimeZone string
- Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
- NextJob stringTime 
- (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Schedule string
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- TimeZone string
- Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
- nextJob StringTime 
- (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- schedule String
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- timeZone String
- Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
- nextJob stringTime 
- (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- schedule string
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- timeZone string
- Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
- next_job_ strtime 
- (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- schedule str
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- time_zone str
- Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
- nextJob StringTime 
- (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- schedule String
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- timeZone String
- Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
PipelineWorkload, PipelineWorkloadArgs    
- DataflowFlex PipelineTemplate Request Workload Dataflow Flex Template Request 
- Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- DataflowLaunch PipelineTemplate Request Workload Dataflow Launch Template Request 
- Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
- DataflowFlex PipelineTemplate Request Workload Dataflow Flex Template Request 
- Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- DataflowLaunch PipelineTemplate Request Workload Dataflow Launch Template Request 
- Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
- dataflowFlex PipelineTemplate Request Workload Dataflow Flex Template Request 
- Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- dataflowLaunch PipelineTemplate Request Workload Dataflow Launch Template Request 
- Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
- dataflowFlex PipelineTemplate Request Workload Dataflow Flex Template Request 
- Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- dataflowLaunch PipelineTemplate Request Workload Dataflow Launch Template Request 
- Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
- dataflow_flex_ Pipelinetemplate_ request Workload Dataflow Flex Template Request 
- Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- dataflow_launch_ Pipelinetemplate_ request Workload Dataflow Launch Template Request 
- Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
- dataflowFlex Property MapTemplate Request 
- Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- dataflowLaunch Property MapTemplate Request 
- Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
PipelineWorkloadDataflowFlexTemplateRequest, PipelineWorkloadDataflowFlexTemplateRequestArgs            
- LaunchParameter PipelineWorkload Dataflow Flex Template Request Launch Parameter 
- Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- Location string
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- ProjectId string
- The ID of the Cloud Platform project that the job belongs to.
- ValidateOnly bool
- If true, the request is validated but not actually executed. Defaults to false.
- LaunchParameter PipelineWorkload Dataflow Flex Template Request Launch Parameter 
- Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- Location string
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- ProjectId string
- The ID of the Cloud Platform project that the job belongs to.
- ValidateOnly bool
- If true, the request is validated but not actually executed. Defaults to false.
- launchParameter PipelineWorkload Dataflow Flex Template Request Launch Parameter 
- Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- location String
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- projectId String
- The ID of the Cloud Platform project that the job belongs to.
- validateOnly Boolean
- If true, the request is validated but not actually executed. Defaults to false.
- launchParameter PipelineWorkload Dataflow Flex Template Request Launch Parameter 
- Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- location string
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- projectId string
- The ID of the Cloud Platform project that the job belongs to.
- validateOnly boolean
- If true, the request is validated but not actually executed. Defaults to false.
- launch_parameter PipelineWorkload Dataflow Flex Template Request Launch Parameter 
- Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- location str
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- project_id str
- The ID of the Cloud Platform project that the job belongs to.
- validate_only bool
- If true, the request is validated but not actually executed. Defaults to false.
- launchParameter Property Map
- Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- location String
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- projectId String
- The ID of the Cloud Platform project that the job belongs to.
- validateOnly Boolean
- If true, the request is validated but not actually executed. Defaults to false.
PipelineWorkloadDataflowFlexTemplateRequestLaunchParameter, PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterArgs                
- JobName string
- The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- ContainerSpec stringGcs Path 
- Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- Environment
PipelineWorkload Dataflow Flex Template Request Launch Parameter Environment 
- The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- LaunchOptions Dictionary<string, string>
- Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Parameters Dictionary<string, string>
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- TransformName Dictionary<string, string>Mappings 
- 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Update bool
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
- JobName string
- The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- ContainerSpec stringGcs Path 
- Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- Environment
PipelineWorkload Dataflow Flex Template Request Launch Parameter Environment 
- The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- LaunchOptions map[string]string
- Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Parameters map[string]string
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- TransformName map[string]stringMappings 
- 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Update bool
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
- jobName String
- The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- containerSpec StringGcs Path 
- Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- environment
PipelineWorkload Dataflow Flex Template Request Launch Parameter Environment 
- The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- launchOptions Map<String,String>
- Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- parameters Map<String,String>
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transformName Map<String,String>Mappings 
- 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update Boolean
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
- jobName string
- The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- containerSpec stringGcs Path 
- Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- environment
PipelineWorkload Dataflow Flex Template Request Launch Parameter Environment 
- The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- launchOptions {[key: string]: string}
- Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- parameters {[key: string]: string}
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transformName {[key: string]: string}Mappings 
- 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update boolean
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
- job_name str
- The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- container_spec_ strgcs_ path 
- Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- environment
PipelineWorkload Dataflow Flex Template Request Launch Parameter Environment 
- The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- launch_options Mapping[str, str]
- Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- parameters Mapping[str, str]
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transform_name_ Mapping[str, str]mappings 
- 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update bool
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
- jobName String
- The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- containerSpec StringGcs Path 
- Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- environment Property Map
- The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- launchOptions Map<String>
- Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- parameters Map<String>
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transformName Map<String>Mappings 
- 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update Boolean
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironment, PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironmentArgs                  
- AdditionalExperiments List<string>
- Additional experiment flags for the job.
- AdditionalUser Dictionary<string, string>Labels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- EnableStreaming boolEngine 
- Whether to enable Streaming Engine for the job.
- FlexrsGoal string
- Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are: FLEXRS_UNSPECIFIED,FLEXRS_SPEED_OPTIMIZED,FLEXRS_COST_OPTIMIZED.
- IpConfiguration string
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- KmsKey stringName 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- MachineType string
- The machine type to use for the job. Defaults to the value from the template if not specified.
- MaxWorkers int
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- Network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- NumWorkers int
- The initial number of Compute Engine instances for the job.
- ServiceAccount stringEmail 
- The email address of the service account to run the job as.
- Subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- TempLocation string
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- WorkerRegion string
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- WorkerZone string
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- Zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- AdditionalExperiments []string
- Additional experiment flags for the job.
- AdditionalUser map[string]stringLabels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- EnableStreaming boolEngine 
- Whether to enable Streaming Engine for the job.
- FlexrsGoal string
- Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are: FLEXRS_UNSPECIFIED,FLEXRS_SPEED_OPTIMIZED,FLEXRS_COST_OPTIMIZED.
- IpConfiguration string
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- KmsKey stringName 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- MachineType string
- The machine type to use for the job. Defaults to the value from the template if not specified.
- MaxWorkers int
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- Network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- NumWorkers int
- The initial number of Compute Engine instances for the job.
- ServiceAccount stringEmail 
- The email address of the service account to run the job as.
- Subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- TempLocation string
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- WorkerRegion string
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- WorkerZone string
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- Zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additionalExperiments List<String>
- Additional experiment flags for the job.
- additionalUser Map<String,String>Labels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- enableStreaming BooleanEngine 
- Whether to enable Streaming Engine for the job.
- flexrsGoal String
- Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are: FLEXRS_UNSPECIFIED,FLEXRS_SPEED_OPTIMIZED,FLEXRS_COST_OPTIMIZED.
- ipConfiguration String
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- kmsKey StringName 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machineType String
- The machine type to use for the job. Defaults to the value from the template if not specified.
- maxWorkers Integer
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network String
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- numWorkers Integer
- The initial number of Compute Engine instances for the job.
- serviceAccount StringEmail 
- The email address of the service account to run the job as.
- subnetwork String
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- tempLocation String
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- workerRegion String
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- workerZone String
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone String
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additionalExperiments string[]
- Additional experiment flags for the job.
- additionalUser {[key: string]: string}Labels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- enableStreaming booleanEngine 
- Whether to enable Streaming Engine for the job.
- flexrsGoal string
- Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are: FLEXRS_UNSPECIFIED,FLEXRS_SPEED_OPTIMIZED,FLEXRS_COST_OPTIMIZED.
- ipConfiguration string
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- kmsKey stringName 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machineType string
- The machine type to use for the job. Defaults to the value from the template if not specified.
- maxWorkers number
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- numWorkers number
- The initial number of Compute Engine instances for the job.
- serviceAccount stringEmail 
- The email address of the service account to run the job as.
- subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- tempLocation string
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- workerRegion string
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- workerZone string
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additional_experiments Sequence[str]
- Additional experiment flags for the job.
- additional_user_ Mapping[str, str]labels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- enable_streaming_ boolengine 
- Whether to enable Streaming Engine for the job.
- flexrs_goal str
- Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are: FLEXRS_UNSPECIFIED,FLEXRS_SPEED_OPTIMIZED,FLEXRS_COST_OPTIMIZED.
- ip_configuration str
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- kms_key_ strname 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machine_type str
- The machine type to use for the job. Defaults to the value from the template if not specified.
- max_workers int
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network str
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- num_workers int
- The initial number of Compute Engine instances for the job.
- service_account_ stremail 
- The email address of the service account to run the job as.
- subnetwork str
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- temp_location str
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- worker_region str
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- worker_zone str
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone str
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additionalExperiments List<String>
- Additional experiment flags for the job.
- additionalUser Map<String>Labels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- enableStreaming BooleanEngine 
- Whether to enable Streaming Engine for the job.
- flexrsGoal String
- Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are: FLEXRS_UNSPECIFIED,FLEXRS_SPEED_OPTIMIZED,FLEXRS_COST_OPTIMIZED.
- ipConfiguration String
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- kmsKey StringName 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machineType String
- The machine type to use for the job. Defaults to the value from the template if not specified.
- maxWorkers Number
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network String
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- numWorkers Number
- The initial number of Compute Engine instances for the job.
- serviceAccount StringEmail 
- The email address of the service account to run the job as.
- subnetwork String
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- tempLocation String
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- workerRegion String
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- workerZone String
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone String
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
PipelineWorkloadDataflowLaunchTemplateRequest, PipelineWorkloadDataflowLaunchTemplateRequestArgs            
- ProjectId string
- The ID of the Cloud Platform project that the job belongs to.
- GcsPath string
- A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- LaunchParameters PipelineWorkload Dataflow Launch Template Request Launch Parameters 
- The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- Location string
- The regional endpoint to which to direct the request.
- ValidateOnly bool
- (Optional)
- ProjectId string
- The ID of the Cloud Platform project that the job belongs to.
- GcsPath string
- A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- LaunchParameters PipelineWorkload Dataflow Launch Template Request Launch Parameters 
- The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- Location string
- The regional endpoint to which to direct the request.
- ValidateOnly bool
- (Optional)
- projectId String
- The ID of the Cloud Platform project that the job belongs to.
- gcsPath String
- A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- launchParameters PipelineWorkload Dataflow Launch Template Request Launch Parameters 
- The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- location String
- The regional endpoint to which to direct the request.
- validateOnly Boolean
- (Optional)
- projectId string
- The ID of the Cloud Platform project that the job belongs to.
- gcsPath string
- A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- launchParameters PipelineWorkload Dataflow Launch Template Request Launch Parameters 
- The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- location string
- The regional endpoint to which to direct the request.
- validateOnly boolean
- (Optional)
- project_id str
- The ID of the Cloud Platform project that the job belongs to.
- gcs_path str
- A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- launch_parameters PipelineWorkload Dataflow Launch Template Request Launch Parameters 
- The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- location str
- The regional endpoint to which to direct the request.
- validate_only bool
- (Optional)
- projectId String
- The ID of the Cloud Platform project that the job belongs to.
- gcsPath String
- A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- launchParameters Property Map
- The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- location String
- The regional endpoint to which to direct the request.
- validateOnly Boolean
- (Optional)
PipelineWorkloadDataflowLaunchTemplateRequestLaunchParameters, PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs                
- JobName string
- The job name to use for the created job.
- Environment
PipelineWorkload Dataflow Launch Template Request Launch Parameters Environment 
- The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- Parameters Dictionary<string, string>
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- TransformName Dictionary<string, string>Mapping 
- Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Update bool
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- JobName string
- The job name to use for the created job.
- Environment
PipelineWorkload Dataflow Launch Template Request Launch Parameters Environment 
- The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- Parameters map[string]string
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- TransformName map[string]stringMapping 
- Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Update bool
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- jobName String
- The job name to use for the created job.
- environment
PipelineWorkload Dataflow Launch Template Request Launch Parameters Environment 
- The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- parameters Map<String,String>
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transformName Map<String,String>Mapping 
- Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update Boolean
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- jobName string
- The job name to use for the created job.
- environment
PipelineWorkload Dataflow Launch Template Request Launch Parameters Environment 
- The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- parameters {[key: string]: string}
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transformName {[key: string]: string}Mapping 
- Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update boolean
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- job_name str
- The job name to use for the created job.
- environment
PipelineWorkload Dataflow Launch Template Request Launch Parameters Environment 
- The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- parameters Mapping[str, str]
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transform_name_ Mapping[str, str]mapping 
- Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update bool
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- jobName String
- The job name to use for the created job.
- environment Property Map
- The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- parameters Map<String>
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transformName Map<String>Mapping 
- Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update Boolean
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironment, PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs                  
- AdditionalExperiments List<string>
- Additional experiment flags for the job.
- AdditionalUser Dictionary<string, string>Labels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- BypassTemp boolDir Validation 
- Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- EnableStreaming boolEngine 
- Whether to enable Streaming Engine for the job.
- IpConfiguration string
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- KmsKey stringName 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- MachineType string
- The machine type to use for the job. Defaults to the value from the template if not specified.
- MaxWorkers int
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- Network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- NumWorkers int
- The initial number of Compute Engine instances for the job.
- ServiceAccount stringEmail 
- The email address of the service account to run the job as.
- Subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- TempLocation string
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- WorkerRegion string
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- WorkerZone string
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- Zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- AdditionalExperiments []string
- Additional experiment flags for the job.
- AdditionalUser map[string]stringLabels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- BypassTemp boolDir Validation 
- Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- EnableStreaming boolEngine 
- Whether to enable Streaming Engine for the job.
- IpConfiguration string
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- KmsKey stringName 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- MachineType string
- The machine type to use for the job. Defaults to the value from the template if not specified.
- MaxWorkers int
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- Network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- NumWorkers int
- The initial number of Compute Engine instances for the job.
- ServiceAccount stringEmail 
- The email address of the service account to run the job as.
- Subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- TempLocation string
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- WorkerRegion string
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- WorkerZone string
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- Zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additionalExperiments List<String>
- Additional experiment flags for the job.
- additionalUser Map<String,String>Labels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- bypassTemp BooleanDir Validation 
- Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- enableStreaming BooleanEngine 
- Whether to enable Streaming Engine for the job.
- ipConfiguration String
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- kmsKey StringName 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machineType String
- The machine type to use for the job. Defaults to the value from the template if not specified.
- maxWorkers Integer
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network String
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- numWorkers Integer
- The initial number of Compute Engine instances for the job.
- serviceAccount StringEmail 
- The email address of the service account to run the job as.
- subnetwork String
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- tempLocation String
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- workerRegion String
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- workerZone String
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone String
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additionalExperiments string[]
- Additional experiment flags for the job.
- additionalUser {[key: string]: string}Labels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- bypassTemp booleanDir Validation 
- Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- enableStreaming booleanEngine 
- Whether to enable Streaming Engine for the job.
- ipConfiguration string
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- kmsKey stringName 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machineType string
- The machine type to use for the job. Defaults to the value from the template if not specified.
- maxWorkers number
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- numWorkers number
- The initial number of Compute Engine instances for the job.
- serviceAccount stringEmail 
- The email address of the service account to run the job as.
- subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- tempLocation string
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- workerRegion string
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- workerZone string
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additional_experiments Sequence[str]
- Additional experiment flags for the job.
- additional_user_ Mapping[str, str]labels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- bypass_temp_ booldir_ validation 
- Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- enable_streaming_ boolengine 
- Whether to enable Streaming Engine for the job.
- ip_configuration str
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- kms_key_ strname 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machine_type str
- The machine type to use for the job. Defaults to the value from the template if not specified.
- max_workers int
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network str
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- num_workers int
- The initial number of Compute Engine instances for the job.
- service_account_ stremail 
- The email address of the service account to run the job as.
- subnetwork str
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- temp_location str
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- worker_region str
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- worker_zone str
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone str
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additionalExperiments List<String>
- Additional experiment flags for the job.
- additionalUser Map<String>Labels 
- Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- bypassTemp BooleanDir Validation 
- Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- enableStreaming BooleanEngine 
- Whether to enable Streaming Engine for the job.
- ipConfiguration String
- Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are: WORKER_IP_UNSPECIFIED,WORKER_IP_PUBLIC,WORKER_IP_PRIVATE.
- kmsKey StringName 
- 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machineType String
- The machine type to use for the job. Defaults to the value from the template if not specified.
- maxWorkers Number
- The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network String
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- numWorkers Number
- The initial number of Compute Engine instances for the job.
- serviceAccount StringEmail 
- The email address of the service account to run the job as.
- subnetwork String
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- tempLocation String
- The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- workerRegion String
- The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- workerZone String
- The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone String
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
Import
Pipeline can be imported using any of these accepted formats:
- projects/{{project}}/locations/{{region}}/pipelines/{{name}}
- {{project}}/{{region}}/{{name}}
- {{region}}/{{name}}
- {{name}}
When using the pulumi import command, Pipeline can be imported using one of the formats above. For example:
$ pulumi import gcp:dataflow/pipeline:Pipeline default projects/{{project}}/locations/{{region}}/pipelines/{{name}}
$ pulumi import gcp:dataflow/pipeline:Pipeline default {{project}}/{{region}}/{{name}}
$ pulumi import gcp:dataflow/pipeline:Pipeline default {{region}}/{{name}}
$ pulumi import gcp:dataflow/pipeline:Pipeline default {{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the google-betaTerraform Provider.