Skip to content

azurermDataFactoryFlowletDataFlow

Manages a Flowlet Data Flow inside an Azure Data Factory.

Example Usage

/*Provider bindings are generated by running cdktf get.
See https://cdk.tf/provider-generation for more details.*/
import * as azurerm from "./.gen/providers/azurerm";
/*The following providers are missing schema information and might need manual adjustments to synthesize correctly: azurerm.
For a more precise conversion please use the --provider flag in convert.*/
const azurermDataFactoryFlowletDataFlowExample1 =
  new azurerm.dataFactoryFlowletDataFlow.DataFactoryFlowletDataFlow(
    this,
    "example1",
    {
      data_factory_id: "${azurerm_data_factory.test.id}",
      name: "example",
      script:
        "source(\n  allowSchemaDrift: true, \n  validateSchema: false, \n  limit: 100, \n  ignoreNoFilesFound: false, \n  documentForm: 'documentPerLine') ~> source1 \nsource1 sink(\n  allowSchemaDrift: true, \n  validateSchema: false, \n  skipDuplicateMapInputs: true, \n  skipDuplicateMapOutputs: true) ~> sink1\n",
      sink: [
        {
          linked_service: [
            {
              name: "${azurerm_data_factory_linked_custom_service.test.name}",
            },
          ],
          name: "sink1",
        },
      ],
      source: [
        {
          linked_service: [
            {
              name: "${azurerm_data_factory_linked_custom_service.test.name}",
            },
          ],
          name: "source1",
        },
      ],
    }
  );
const azurermDataFactoryFlowletDataFlowExample2 =
  new azurerm.dataFactoryFlowletDataFlow.DataFactoryFlowletDataFlow(
    this,
    "example2",
    {
      data_factory_id: "${azurerm_data_factory.test.id}",
      name: "example",
      script:
        "source(\n  allowSchemaDrift: true, \n  validateSchema: false, \n  limit: 100, \n  ignoreNoFilesFound: false, \n  documentForm: 'documentPerLine') ~> source1 \nsource1 sink(\n  allowSchemaDrift: true, \n  validateSchema: false, \n  skipDuplicateMapInputs: true, \n  skipDuplicateMapOutputs: true) ~> sink1\n",
      sink: [
        {
          linked_service: [
            {
              name: "${azurerm_data_factory_linked_custom_service.test.name}",
            },
          ],
          name: "sink1",
        },
      ],
      source: [
        {
          linked_service: [
            {
              name: "${azurerm_data_factory_linked_custom_service.test.name}",
            },
          ],
          name: "source1",
        },
      ],
    }
  );
const azurermResourceGroupExample = new azurerm.resourceGroup.ResourceGroup(
  this,
  "example",
  {
    location: "West Europe",
    name: "example-resources",
  }
);
const azurermStorageAccountExample = new azurerm.storageAccount.StorageAccount(
  this,
  "example_3",
  {
    account_replication_type: "LRS",
    account_tier: "Standard",
    location: azurermResourceGroupExample.location,
    name: "example",
    resource_group_name: azurermResourceGroupExample.name,
  }
);
/*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/
azurermStorageAccountExample.overrideLogicalId("example");
const azurermDataFactoryExample = new azurerm.dataFactory.DataFactory(
  this,
  "example_4",
  {
    location: azurermResourceGroupExample.location,
    name: "example",
    resource_group_name: azurermResourceGroupExample.name,
  }
);
/*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/
azurermDataFactoryExample.overrideLogicalId("example");
const azurermDataFactoryFlowletDataFlowExample =
  new azurerm.dataFactoryFlowletDataFlow.DataFactoryFlowletDataFlow(
    this,
    "example_5",
    {
      data_factory_id: azurermDataFactoryExample.id,
      name: "example",
      script:
        "source(\n  allowSchemaDrift: true, \n  validateSchema: false, \n  limit: 100, \n  ignoreNoFilesFound: false, \n  documentForm: 'documentPerLine') ~> source1 \nsource1 sink(\n  allowSchemaDrift: true, \n  validateSchema: false, \n  skipDuplicateMapInputs: true, \n  skipDuplicateMapOutputs: true) ~> sink1\n",
      sink: [
        {
          flowlet: [
            {
              name: azurermDataFactoryFlowletDataFlowExample2.name,
            },
          ],
          linked_service: [
            {
              name: "${azurerm_data_factory_linked_custom_service.test.name}",
            },
          ],
          name: "sink1",
        },
      ],
      source: [
        {
          flowlet: [
            {
              name: azurermDataFactoryFlowletDataFlowExample1.name,
            },
          ],
          linked_service: [
            {
              name: "${azurerm_data_factory_linked_custom_service.test.name}",
            },
          ],
          name: "source1",
        },
      ],
    }
  );
/*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/
azurermDataFactoryFlowletDataFlowExample.overrideLogicalId("example");
const azurermDataFactoryLinkedCustomServiceExample =
  new azurerm.dataFactoryLinkedCustomService.DataFactoryLinkedCustomService(
    this,
    "example_6",
    {
      data_factory_id: azurermDataFactoryExample.id,
      name: "linked_service",
      type: "AzureBlobStorage",
      type_properties_json: `{
  "connectionString": "\${${azurermStorageAccountExample.primaryConnectionString}}"
}
`,
    }
  );
/*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/
azurermDataFactoryLinkedCustomServiceExample.overrideLogicalId("example");
const azurermDataFactoryDatasetJsonExample1 =
  new azurerm.dataFactoryDatasetJson.DataFactoryDatasetJson(
    this,
    "example1_7",
    {
      azure_blob_storage_location: [
        {
          container: "container",
          filename: "foo.txt",
          path: "foo/bar/",
        },
      ],
      data_factory_id: azurermDataFactoryExample.id,
      encoding: "UTF-8",
      linked_service_name: azurermDataFactoryLinkedCustomServiceExample.name,
      name: "dataset1",
    }
  );
/*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/
azurermDataFactoryDatasetJsonExample1.overrideLogicalId("example1");
const azurermDataFactoryDatasetJsonExample2 =
  new azurerm.dataFactoryDatasetJson.DataFactoryDatasetJson(
    this,
    "example2_8",
    {
      azure_blob_storage_location: [
        {
          container: "container",
          filename: "bar.txt",
          path: "foo/bar/",
        },
      ],
      data_factory_id: azurermDataFactoryExample.id,
      encoding: "UTF-8",
      linked_service_name: azurermDataFactoryLinkedCustomServiceExample.name,
      name: "dataset2",
    }
  );
/*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/
azurermDataFactoryDatasetJsonExample2.overrideLogicalId("example2");

Argument Reference

The following arguments are supported:

  • annotations - (Optional) List of tags that can be used for describing the Data Factory Flowlet Data Flow.

  • dataFactoryId - (Required) The ID of Data Factory in which to associate the Data Flow with. Changing this forces a new resource.

  • name - (Required) Specifies the name of the Data Factory Flowlet Data Flow. Changing this forces a new resource to be created.

  • description - (Optional) The description for the Data Factory Flowlet Data Flow.

  • folder - (Optional) The folder that this Data Flow is in. If not specified, the Data Flow will appear at the root level.

  • source - (Required) One or more source blocks as defined below.

  • sink - (Required) One or more sink blocks as defined below.

  • script - (Optional) The script for the Data Factory Flowlet Data Flow.

  • scriptLines - (Optional) The script lines for the Data Factory Flowlet Data Flow.

  • transformation - (Optional) One or more transformation blocks as defined below.


A dataset block supports the following:

  • name - (Required) The name for the Data Factory Dataset.

  • parameters - (Optional) A map of parameters to associate with the Data Factory dataset.


A flowlet block supports the following:

  • name - (Required) The name for the Data Factory Flowlet.

  • datasetParameters - (Optional) Specifies the reference data flow parameters from dataset.

  • parameters - (Optional) A map of parameters to associate with the Data Factory Flowlet.


A linkedService block supports the following:

  • name - (Required) The name for the Data Factory Linked Service.

  • parameters - (Optional) A map of parameters to associate with the Data Factory Linked Service.


A source block supports the following:

  • description - (Optional) The description for the Data Flow Source.

  • dataset - (Optional) A dataset block as defined below.

  • flowlet - (Optional) A flowlet block as defined below.

  • linkedService - (Optional) A linkedService block as defined below.

  • name - (Required) The name for the Data Flow Source.

  • rejectedLinkedService - (Optional) A rejectedLinkedService block as defined below.

  • schemaLinkedService - (Optional) A schemaLinkedService block as defined below.


A sink block supports the following:

  • description - (Optional) The description for the Data Flow Source.

  • dataset - (Optional) A dataset block as defined below.

  • flowlet - (Optional) A flowlet block as defined below.

  • linkedService - (Optional) A linkedService block as defined below.

  • name - (Required) The name for the Data Flow Source.

  • rejectedLinkedService - (Optional) A rejectedLinkedService block as defined below.

  • schemaLinkedService - (Optional) A schemaLinkedService block as defined below.


A rejectedLinkedService block supports the following:

  • name - (Required) The name for the Data Factory Linked Service with schema.

  • parameters - (Optional) A map of parameters to associate with the Data Factory Linked Service.


A schemaLinkedService block supports the following:

  • name - (Required) The name for the Data Factory Linked Service with schema.

  • parameters - (Optional) A map of parameters to associate with the Data Factory Linked Service.


A transformation block supports the following:

  • name - (Required) The name for the Data Flow transformation.

  • description - (Optional) The description for the Data Flow transformation.

  • dataset - (Optional) A dataset block as defined below.

  • flowlet - (Optional) A flowlet block as defined below.

  • linkedService - (Optional) A linkedService block as defined below.

Attributes Reference

The following attributes are exported:

  • id - The ID of the Data Factory Flowlet Data Flow.

Timeouts

The timeouts block allows you to specify timeouts for certain actions:

  • create - (Defaults to 30 minutes) Used when creating the Data Factory Flowlet Data Flow.
  • update - (Defaults to 30 minutes) Used when updating the Data Factory Flowlet Data Flow.
  • read - (Defaults to 5 minutes) Used when retrieving the Data Factory Flowlet Data Flow.
  • delete - (Defaults to 30 minutes) Used when deleting the Data Factory Flowlet Data Flow.

Import

Data Factory Flowlet Data Flow can be imported using the resourceId, e.g.

terraform import azurerm_data_factory_flowlet_data_flow.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.DataFactory/factories/example/dataflows/example