File: azure_databricks_linked_service.py

package info (click to toggle)
python-azure 20181112%2Bgit-2
  • links: PTS, VCS
  • area: main
  • in suites: buster
  • size: 407,300 kB
  • sloc: python: 717,190; makefile: 201; sh: 76
file content (99 lines) | stat: -rw-r--r-- 5,360 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .linked_service import LinkedService


class AzureDatabricksLinkedService(LinkedService):
    """Azure Databricks linked service.

    :param additional_properties: Unmatched properties from the message are
     deserialized this collection
    :type additional_properties: dict[str, object]
    :param connect_via: The integration runtime reference.
    :type connect_via:
     ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
    :param description: Linked service description.
    :type description: str
    :param parameters: Parameters for linked service.
    :type parameters: dict[str,
     ~azure.mgmt.datafactory.models.ParameterSpecification]
    :param annotations: List of tags that can be used for describing the
     Dataset.
    :type annotations: list[object]
    :param type: Constant filled by server.
    :type type: str
    :param domain: <REGION>.azuredatabricks.net, domain name of your
     Databricks deployment. Type: string (or Expression with resultType
     string).
    :type domain: object
    :param access_token: Access token for databricks REST API. Refer to
     https://docs.azuredatabricks.net/api/latest/authentication.html. Type:
     string (or Expression with resultType string).
    :type access_token: ~azure.mgmt.datafactory.models.SecretBase
    :param existing_cluster_id: The id of an existing cluster that will be
     used for all runs of this job. Type: string (or Expression with resultType
     string).
    :type existing_cluster_id: object
    :param new_cluster_version: The Spark version of new cluster. Type: string
     (or Expression with resultType string).
    :type new_cluster_version: object
    :param new_cluster_num_of_worker: Number of worker nodes that new cluster
     should have. A string formatted Int32, like '1' means numOfWorker is 1 or
     '1:10' means auto-scale from 1 as min and 10 as max. Type: string (or
     Expression with resultType string).
    :type new_cluster_num_of_worker: object
    :param new_cluster_node_type: The node types of new cluster. Type: string
     (or Expression with resultType string).
    :type new_cluster_node_type: object
    :param new_cluster_spark_conf: a set of optional, user-specified Spark
     configuration key-value pairs.
    :type new_cluster_spark_conf: dict[str, object]
    :param encrypted_credential: The encrypted credential used for
     authentication. Credentials are encrypted using the integration runtime
     credential manager. Type: string (or Expression with resultType string).
    :type encrypted_credential: object
    """

    _validation = {
        'type': {'required': True},
        'domain': {'required': True},
        'access_token': {'required': True},
    }

    _attribute_map = {
        'additional_properties': {'key': '', 'type': '{object}'},
        'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
        'description': {'key': 'description', 'type': 'str'},
        'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
        'annotations': {'key': 'annotations', 'type': '[object]'},
        'type': {'key': 'type', 'type': 'str'},
        'domain': {'key': 'typeProperties.domain', 'type': 'object'},
        'access_token': {'key': 'typeProperties.accessToken', 'type': 'SecretBase'},
        'existing_cluster_id': {'key': 'typeProperties.existingClusterId', 'type': 'object'},
        'new_cluster_version': {'key': 'typeProperties.newClusterVersion', 'type': 'object'},
        'new_cluster_num_of_worker': {'key': 'typeProperties.newClusterNumOfWorker', 'type': 'object'},
        'new_cluster_node_type': {'key': 'typeProperties.newClusterNodeType', 'type': 'object'},
        'new_cluster_spark_conf': {'key': 'typeProperties.newClusterSparkConf', 'type': '{object}'},
        'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
    }

    def __init__(self, domain, access_token, additional_properties=None, connect_via=None, description=None, parameters=None, annotations=None, existing_cluster_id=None, new_cluster_version=None, new_cluster_num_of_worker=None, new_cluster_node_type=None, new_cluster_spark_conf=None, encrypted_credential=None):
        super(AzureDatabricksLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description, parameters=parameters, annotations=annotations)
        self.domain = domain
        self.access_token = access_token
        self.existing_cluster_id = existing_cluster_id
        self.new_cluster_version = new_cluster_version
        self.new_cluster_num_of_worker = new_cluster_num_of_worker
        self.new_cluster_node_type = new_cluster_node_type
        self.new_cluster_spark_conf = new_cluster_spark_conf
        self.encrypted_credential = encrypted_credential
        self.type = 'AzureDatabricks'