code
stringlengths
54
334k
docs
stringlengths
18
26.2k
def utc_epoch(): # pendulum utcnow() is not used as that sets a TimezoneInfo object # instead of a Timezone. This is not pickable and also creates issues # when using replace() d = dt.datetime(1970, 1, 1) d = d.replace(tzinfo=utc) return d
[python] Gets the epoch in the users timezone :return:
def convert_to_utc(value): if not value: return value if not is_localized(value): value = pendulum.instance(value, TIMEZONE) return value.astimezone(utc)
[python] Returns the datetime with the default timezone added if timezone information was not associated :param value: datetime :return: datetime with tzinfo
def make_aware(value, timezone=None): if timezone is None: timezone = TIMEZONE # Check that we won't overwrite the timezone of an aware datetime. if is_localized(value): raise ValueError( "make_aware expects a naive datetime, got %s" % value) if hasattr(value, 'fold'): # In case of python 3.6 we want to do the same that pendulum does for python3.5 # i.e in case we move clock back we want to schedule the run at the time of the second # instance of the same clock time rather than the first one. # Fold parameter has no impact in other cases so we can safely set it to 1 here value = value.replace(fold=1) if hasattr(timezone, 'localize'): # This method is available for pytz time zones. return timezone.localize(value) elif hasattr(timezone, 'convert'): # For pendulum return timezone.convert(value) else: # This may be wrong around DST changes! return value.replace(tzinfo=timezone)
[python] Make a naive datetime.datetime in a given time zone aware. :param value: datetime :param timezone: timezone :return: localized datetime in settings.TIMEZONE or timezone
def make_naive(value, timezone=None): if timezone is None: timezone = TIMEZONE # Emulate the behavior of astimezone() on Python < 3.6. if is_naive(value): raise ValueError("make_naive() cannot be applied to a naive datetime") o = value.astimezone(timezone) # cross library compatibility naive = dt.datetime(o.year, o.month, o.day, o.hour, o.minute, o.second, o.microsecond) return naive
[python] Make an aware datetime.datetime naive in a given time zone. :param value: datetime :param timezone: timezone :return: naive datetime
def datetime(*args, **kwargs): if 'tzinfo' not in kwargs: kwargs['tzinfo'] = TIMEZONE return dt.datetime(*args, **kwargs)
[python] Wrapper around datetime.datetime that adds settings.TIMEZONE if tzinfo not specified :return: datetime.datetime
def _set_env_from_extras(self, extras): key_path = self._get_field(extras, 'key_path', False) keyfile_json_str = self._get_field(extras, 'keyfile_dict', False) if not key_path and not keyfile_json_str: self.log.info('Using gcloud with application default credentials.') elif key_path: os.environ[G_APP_CRED] = key_path else: # Write service account JSON to secure file for gcloud to reference service_key = tempfile.NamedTemporaryFile(delete=False) service_key.write(keyfile_json_str) os.environ[G_APP_CRED] = service_key.name # Return file object to have a pointer to close after use, # thus deleting from file system. return service_key
[python] Sets the environment variable `GOOGLE_APPLICATION_CREDENTIALS` with either: - The path to the keyfile from the specified connection id - A generated file's path if the user specified JSON in the connection id. The file is assumed to be deleted after the process dies due to how mkstemp() works. The environment variable is used inside the gcloud command to determine correct service account to use.
def _get_field(self, extras, field, default=None): long_f = 'extra__google_cloud_platform__{}'.format(field) if long_f in extras: return extras[long_f] else: self.log.info('Field %s not found in extras.', field) return default
[python] Fetches a field from extras, and returns it. This is some Airflow magic. The google_cloud_platform hook type adds custom UI elements to the hook page, which allow admins to specify service_account, key_path, etc. They get formatted as shown below.
def get_conn(self): conn = self.get_connection(self.druid_broker_conn_id) druid_broker_conn = connect( host=conn.host, port=conn.port, path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'), scheme=conn.extra_dejson.get('schema', 'http') ) self.log.info('Get the connection to druid broker on %s', conn.host) return druid_broker_conn
[python] Establish a connection to druid broker.
def get_conn(self, headers=None): session = requests.Session() if self.http_conn_id: conn = self.get_connection(self.http_conn_id) if "://" in conn.host: self.base_url = conn.host else: # schema defaults to HTTP schema = conn.schema if conn.schema else "http" self.base_url = schema + "://" + conn.host if conn.port: self.base_url = self.base_url + ":" + str(conn.port) if conn.login: session.auth = (conn.login, conn.password) if conn.extra: try: session.headers.update(conn.extra_dejson) except TypeError: self.log.warn('Connection to %s has invalid extra field.', conn.host) if headers: session.headers.update(headers) return session
[python] Returns http session for use with requests :param headers: additional headers to be passed through as a dictionary :type headers: dict
def run(self, endpoint, data=None, headers=None, extra_options=None): extra_options = extra_options or {} session = self.get_conn(headers) if self.base_url and not self.base_url.endswith('/') and \ endpoint and not endpoint.startswith('/'): url = self.base_url + '/' + endpoint else: url = (self.base_url or '') + (endpoint or '') req = None if self.method == 'GET': # GET uses params req = requests.Request(self.method, url, params=data, headers=headers) elif self.method == 'HEAD': # HEAD doesn't use params req = requests.Request(self.method, url, headers=headers) else: # Others use data req = requests.Request(self.method, url, data=data, headers=headers) prepped_request = session.prepare_request(req) self.log.info("Sending '%s' to url: %s", self.method, url) return self.run_and_check(session, prepped_request, extra_options)
[python] Performs the request :param endpoint: the endpoint to be called i.e. resource/v1/query? :type endpoint: str :param data: payload to be uploaded or request parameters :type data: dict :param headers: additional headers to be passed through as a dictionary :type headers: dict :param extra_options: additional options to be used when executing the request i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX or 3XX status codes :type extra_options: dict
def check_response(self, response): try: response.raise_for_status() except requests.exceptions.HTTPError: self.log.error("HTTP error: %s", response.reason) if self.method not in ['GET', 'HEAD']: self.log.error(response.text) raise AirflowException(str(response.status_code) + ":" + response.reason)
[python] Checks the status code and raise an AirflowException exception on non 2XX or 3XX status codes :param response: A requests response object :type response: requests.response
def run_and_check(self, session, prepped_request, extra_options): extra_options = extra_options or {} try: response = session.send( prepped_request, stream=extra_options.get("stream", False), verify=extra_options.get("verify", True), proxies=extra_options.get("proxies", {}), cert=extra_options.get("cert"), timeout=extra_options.get("timeout"), allow_redirects=extra_options.get("allow_redirects", True)) if extra_options.get('check_response', True): self.check_response(response) return response except requests.exceptions.ConnectionError as ex: self.log.warn(str(ex) + ' Tenacity will retry to execute the operation') raise ex
[python] Grabs extra options like timeout and actually runs the request, checking for the result :param session: the session to be used to execute the request :type session: requests.Session :param prepped_request: the prepared request generated in run() :type prepped_request: session.prepare_request :param extra_options: additional options to be used when executing the request i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX or 3XX status codes :type extra_options: dict
def run_with_advanced_retry(self, _retry_args, *args, **kwargs): self._retry_obj = tenacity.Retrying( **_retry_args ) self._retry_obj(self.run, *args, **kwargs)
[python] Runs Hook.run() with a Tenacity decorator attached to it. This is useful for connectors which might be disturbed by intermittent issues and should not instantly fail. :param _retry_args: Arguments which define the retry behaviour. See Tenacity documentation at https://github.com/jd/tenacity :type _retry_args: dict :Example:: hook = HttpHook(http_conn_id='my_conn',method='GET') retry_args = dict( wait=tenacity.wait_exponential(), stop=tenacity.stop_after_attempt(10), retry=requests.exceptions.ConnectionError ) hook.run_with_advanced_retry( endpoint='v1/test', _retry_args=retry_args )
def create_session(): session = settings.Session() try: yield session session.commit() except Exception: session.rollback() raise finally: session.close()
[python] Contextmanager that will create and teardown a session.
def provide_session(func): @wraps(func) def wrapper(*args, **kwargs): arg_session = 'session' func_params = func.__code__.co_varnames session_in_args = arg_session in func_params and \ func_params.index(arg_session) < len(args) session_in_kwargs = arg_session in kwargs if session_in_kwargs or session_in_args: return func(*args, **kwargs) else: with create_session() as session: kwargs[arg_session] = session return func(*args, **kwargs) return wrapper
[python] Function decorator that provides a session if it isn't provided. If you want to reuse a session or run the function as part of a database transaction, you pass it to the function, if not this wrapper will create one and close it for you.
def resetdb(): from airflow import models # alembic adds significant import time, so we import it lazily from alembic.migration import MigrationContext log.info("Dropping tables that exist") models.base.Base.metadata.drop_all(settings.engine) mc = MigrationContext.configure(settings.engine) if mc._version.exists(settings.engine): mc._version.drop(settings.engine) from flask_appbuilder.models.sqla import Base Base.metadata.drop_all(settings.engine) initdb()
[python] Clear out the database
def execute(self, context): hook = WasbHook(wasb_conn_id=self.wasb_conn_id) self.log.info( 'Uploading %s to wasb://%s ' 'as %s'.format(self.file_path, self.container_name, self.blob_name) ) hook.load_file(self.file_path, self.container_name, self.blob_name, **self.load_options)
[python] Upload a file to Azure Blob Storage.
def get_conn(self): db = self.get_connection(self.presto_conn_id) reqkwargs = None if db.password is not None: reqkwargs = {'auth': HTTPBasicAuth(db.login, db.password)} return presto.connect( host=db.host, port=db.port, username=db.login, source=db.extra_dejson.get('source', 'airflow'), protocol=db.extra_dejson.get('protocol', 'http'), catalog=db.extra_dejson.get('catalog', 'hive'), requests_kwargs=reqkwargs, schema=db.schema)
[python] Returns a connection object
def _get_pretty_exception_message(e): if (hasattr(e, 'message') and 'errorName' in e.message and 'message' in e.message): return ('{name}: {message}'.format( name=e.message['errorName'], message=e.message['message'])) else: return str(e)
[python] Parses some DatabaseError to provide a better error message
def get_records(self, hql, parameters=None): try: return super().get_records( self._strip_sql(hql), parameters) except DatabaseError as e: raise PrestoException(self._get_pretty_exception_message(e))
[python] Get a set of records from Presto
def get_pandas_df(self, hql, parameters=None): import pandas cursor = self.get_cursor() try: cursor.execute(self._strip_sql(hql), parameters) data = cursor.fetchall() except DatabaseError as e: raise PrestoException(self._get_pretty_exception_message(e)) column_descriptions = cursor.description if data: df = pandas.DataFrame(data) df.columns = [c[0] for c in column_descriptions] else: df = pandas.DataFrame() return df
[python] Get a pandas dataframe from a sql query.
def run(self, hql, parameters=None): return super().run(self._strip_sql(hql), parameters)
[python] Execute the statement against Presto. Can be used to create views.
def insert_rows(self, table, rows, target_fields=None): super().insert_rows(table, rows, target_fields, 0)
[python] A generic way to insert a set of tuples into a table. :param table: Name of the target table :type table: str :param rows: The rows to insert into the table :type rows: iterable of tuples :param target_fields: The names of the columns to fill in the table :type target_fields: iterable of strings
def get_conn(self): if self.cosmos_client is not None: return self.cosmos_client # Initialize the Python Azure Cosmos DB client self.cosmos_client = cosmos_client.CosmosClient(self.endpoint_uri, {'masterKey': self.master_key}) return self.cosmos_client
[python] Return a cosmos db client.
def does_collection_exist(self, collection_name, database_name=None): if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") existing_container = list(self.get_conn().QueryContainers( get_database_link(self.__get_database_name(database_name)), { "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": collection_name} ] })) if len(existing_container) == 0: return False return True
[python] Checks if a collection exists in CosmosDB.
def create_collection(self, collection_name, database_name=None): if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") # We need to check to see if this container already exists so we don't try # to create it twice existing_container = list(self.get_conn().QueryContainers( get_database_link(self.__get_database_name(database_name)), { "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": collection_name} ] })) # Only create if we did not find it already existing if len(existing_container) == 0: self.get_conn().CreateContainer( get_database_link(self.__get_database_name(database_name)), {"id": collection_name})
[python] Creates a new collection in the CosmosDB database.
def does_database_exist(self, database_name): if database_name is None: raise AirflowBadRequest("Database name cannot be None.") existing_database = list(self.get_conn().QueryDatabases({ "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": database_name} ] })) if len(existing_database) == 0: return False return True
[python] Checks if a database exists in CosmosDB.
def create_database(self, database_name): if database_name is None: raise AirflowBadRequest("Database name cannot be None.") # We need to check to see if this database already exists so we don't try # to create it twice existing_database = list(self.get_conn().QueryDatabases({ "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": database_name} ] })) # Only create if we did not find it already existing if len(existing_database) == 0: self.get_conn().CreateDatabase({"id": database_name})
[python] Creates a new database in CosmosDB.
def delete_database(self, database_name): if database_name is None: raise AirflowBadRequest("Database name cannot be None.") self.get_conn().DeleteDatabase(get_database_link(database_name))
[python] Deletes an existing database in CosmosDB.
def delete_collection(self, collection_name, database_name=None): if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") self.get_conn().DeleteContainer( get_collection_link(self.__get_database_name(database_name), collection_name))
[python] Deletes an existing collection in the CosmosDB database.
def upsert_document(self, document, database_name=None, collection_name=None, document_id=None): # Assign unique ID if one isn't provided if document_id is None: document_id = str(uuid.uuid4()) if document is None: raise AirflowBadRequest("You cannot insert a None document") # Add document id if isn't found if 'id' in document: if document['id'] is None: document['id'] = document_id else: document['id'] = document_id created_document = self.get_conn().CreateItem( get_collection_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name)), document) return created_document
[python] Inserts a new document (or updates an existing one) into an existing collection in the CosmosDB database.
def insert_documents(self, documents, database_name=None, collection_name=None): if documents is None: raise AirflowBadRequest("You cannot insert empty documents") created_documents = [] for single_document in documents: created_documents.append( self.get_conn().CreateItem( get_collection_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name)), single_document)) return created_documents
[python] Insert a list of new documents into an existing collection in the CosmosDB database.
def delete_document(self, document_id, database_name=None, collection_name=None): if document_id is None: raise AirflowBadRequest("Cannot delete a document without an id") self.get_conn().DeleteItem( get_document_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name), document_id))
[python] Delete an existing document out of a collection in the CosmosDB database.
def get_document(self, document_id, database_name=None, collection_name=None): if document_id is None: raise AirflowBadRequest("Cannot get a document without an id") try: return self.get_conn().ReadItem( get_document_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name), document_id)) except HTTPFailure: return None
[python] Get a document from an existing collection in the CosmosDB database.
def get_documents(self, sql_string, database_name=None, collection_name=None, partition_key=None): if sql_string is None: raise AirflowBadRequest("SQL query string cannot be None") # Query them in SQL query = {'query': sql_string} try: result_iterable = self.get_conn().QueryItems( get_collection_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name)), query, partition_key) return list(result_iterable) except HTTPFailure: return None
[python] Get a list of documents from an existing collection in the CosmosDB database via SQL query.
def get_code(dag_id): session = settings.Session() DM = models.DagModel dag = session.query(DM).filter(DM.dag_id == dag_id).first() session.close() # Check DAG exists. if dag is None: error_message = "Dag id {} not found".format(dag_id) raise DagNotFound(error_message) try: with wwwutils.open_maybe_zipped(dag.fileloc, 'r') as f: code = f.read() return code except IOError as e: error_message = "Error {} while reading Dag id {} Code".format(str(e), dag_id) raise AirflowException(error_message)
[python] Return python code of a given dag_id.
def get_function(self, name): return self.get_conn().projects().locations().functions().get( name=name).execute(num_retries=self.num_retries)
[python] Returns the Cloud Function with the given name. :param name: Name of the function. :type name: str :return: A Cloud Functions object representing the function. :rtype: dict
def create_new_function(self, location, body, project_id=None): response = self.get_conn().projects().locations().functions().create( location=self._full_location(project_id, location), body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(operation_name=operation_name)
[python] Creates a new function in Cloud Function in the location specified in the body. :param location: The location of the function. :type location: str :param body: The body required by the Cloud Functions insert API. :type body: dict :param project_id: Optional, Google Cloud Project project_id where the function belongs. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None
def update_function(self, name, body, update_mask): response = self.get_conn().projects().locations().functions().patch( updateMask=",".join(update_mask), name=name, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(operation_name=operation_name)
[python] Updates Cloud Functions according to the specified update mask. :param name: The name of the function. :type name: str :param body: The body required by the cloud function patch API. :type body: dict :param update_mask: The update mask - array of fields that should be patched. :type update_mask: [str] :return: None
def upload_function_zip(self, location, zip_path, project_id=None): response = self.get_conn().projects().locations().functions().generateUploadUrl( parent=self._full_location(project_id, location) ).execute(num_retries=self.num_retries) upload_url = response.get('uploadUrl') with open(zip_path, 'rb') as fp: requests.put( url=upload_url, data=fp, # Those two headers needs to be specified according to: # https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions/generateUploadUrl # nopep8 headers={ 'Content-type': 'application/zip', 'x-goog-content-length-range': '0,104857600', } ) return upload_url
[python] Uploads zip file with sources. :param location: The location where the function is created. :type location: str :param zip_path: The path of the valid .zip file to upload. :type zip_path: str :param project_id: Optional, Google Cloud Project project_id where the function belongs. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: The upload URL that was returned by generateUploadUrl method.
def delete_function(self, name): response = self.get_conn().projects().locations().functions().delete( name=name).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(operation_name=operation_name)
[python] Deletes the specified Cloud Function. :param name: The name of the function. :type name: str :return: None
def _wait_for_operation_to_complete(self, operation_name): service = self.get_conn() while True: operation_response = service.operations().get( name=operation_name, ).execute(num_retries=self.num_retries) if operation_response.get("done"): response = operation_response.get("response") error = operation_response.get("error") # Note, according to documentation always either response or error is # set when "done" == True if error: raise AirflowException(str(error)) return response time.sleep(TIME_TO_SLEEP_IN_SECONDS)
[python] Waits for the named operation to complete - checks status of the asynchronous call. :param operation_name: The name of the operation. :type operation_name: str :return: The response returned by the operation. :rtype: dict :exception: AirflowException in case error is returned.
def publish(self, project, topic, messages): body = {'messages': messages} full_topic = _format_topic(project, topic) request = self.get_conn().projects().topics().publish( topic=full_topic, body=body) try: request.execute(num_retries=self.num_retries) except HttpError as e: raise PubSubException( 'Error publishing to topic {}'.format(full_topic), e)
[python] Publishes messages to a Pub/Sub topic. :param project: the GCP project ID in which to publish :type project: str :param topic: the Pub/Sub topic to which to publish; do not include the ``projects/{project}/topics/`` prefix. :type topic: str :param messages: messages to publish; if the data field in a message is set, it should already be base64 encoded. :type messages: list of PubSub messages; see http://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
def create_topic(self, project, topic, fail_if_exists=False): service = self.get_conn() full_topic = _format_topic(project, topic) try: service.projects().topics().create( name=full_topic, body={}).execute(num_retries=self.num_retries) except HttpError as e: # Status code 409 indicates that the topic already exists. if str(e.resp['status']) == '409': message = 'Topic already exists: {}'.format(full_topic) self.log.warning(message) if fail_if_exists: raise PubSubException(message) else: raise PubSubException( 'Error creating topic {}'.format(full_topic), e)
[python] Creates a Pub/Sub topic, if it does not already exist. :param project: the GCP project ID in which to create the topic :type project: str :param topic: the Pub/Sub topic name to create; do not include the ``projects/{project}/topics/`` prefix. :type topic: str :param fail_if_exists: if set, raise an exception if the topic already exists :type fail_if_exists: bool
def delete_topic(self, project, topic, fail_if_not_exists=False): service = self.get_conn() full_topic = _format_topic(project, topic) try: service.projects().topics().delete(topic=full_topic).execute(num_retries=self.num_retries) except HttpError as e: # Status code 409 indicates that the topic was not found if str(e.resp['status']) == '404': message = 'Topic does not exist: {}'.format(full_topic) self.log.warning(message) if fail_if_not_exists: raise PubSubException(message) else: raise PubSubException( 'Error deleting topic {}'.format(full_topic), e)
[python] Deletes a Pub/Sub topic if it exists. :param project: the GCP project ID in which to delete the topic :type project: str :param topic: the Pub/Sub topic name to delete; do not include the ``projects/{project}/topics/`` prefix. :type topic: str :param fail_if_not_exists: if set, raise an exception if the topic does not exist :type fail_if_not_exists: bool
def create_subscription(self, topic_project, topic, subscription=None, subscription_project=None, ack_deadline_secs=10, fail_if_exists=False): service = self.get_conn() full_topic = _format_topic(topic_project, topic) if not subscription: subscription = 'sub-{}'.format(uuid4()) if not subscription_project: subscription_project = topic_project full_subscription = _format_subscription(subscription_project, subscription) body = { 'topic': full_topic, 'ackDeadlineSeconds': ack_deadline_secs } try: service.projects().subscriptions().create( name=full_subscription, body=body).execute(num_retries=self.num_retries) except HttpError as e: # Status code 409 indicates that the subscription already exists. if str(e.resp['status']) == '409': message = 'Subscription already exists: {}'.format( full_subscription) self.log.warning(message) if fail_if_exists: raise PubSubException(message) else: raise PubSubException( 'Error creating subscription {}'.format(full_subscription), e) return subscription
[python] Creates a Pub/Sub subscription, if it does not already exist. :param topic_project: the GCP project ID of the topic that the subscription will be bound to. :type topic_project: str :param topic: the Pub/Sub topic name that the subscription will be bound to create; do not include the ``projects/{project}/subscriptions/`` prefix. :type topic: str :param subscription: the Pub/Sub subscription name. If empty, a random name will be generated using the uuid module :type subscription: str :param subscription_project: the GCP project ID where the subscription will be created. If unspecified, ``topic_project`` will be used. :type subscription_project: str :param ack_deadline_secs: Number of seconds that a subscriber has to acknowledge each message pulled from the subscription :type ack_deadline_secs: int :param fail_if_exists: if set, raise an exception if the topic already exists :type fail_if_exists: bool :return: subscription name which will be the system-generated value if the ``subscription`` parameter is not supplied :rtype: str
def delete_subscription(self, project, subscription, fail_if_not_exists=False): service = self.get_conn() full_subscription = _format_subscription(project, subscription) try: service.projects().subscriptions().delete( subscription=full_subscription).execute(num_retries=self.num_retries) except HttpError as e: # Status code 404 indicates that the subscription was not found if str(e.resp['status']) == '404': message = 'Subscription does not exist: {}'.format( full_subscription) self.log.warning(message) if fail_if_not_exists: raise PubSubException(message) else: raise PubSubException( 'Error deleting subscription {}'.format(full_subscription), e)
[python] Deletes a Pub/Sub subscription, if it exists. :param project: the GCP project ID where the subscription exists :type project: str :param subscription: the Pub/Sub subscription name to delete; do not include the ``projects/{project}/subscriptions/`` prefix. :type subscription: str :param fail_if_not_exists: if set, raise an exception if the topic does not exist :type fail_if_not_exists: bool
def pull(self, project, subscription, max_messages, return_immediately=False): service = self.get_conn() full_subscription = _format_subscription(project, subscription) body = { 'maxMessages': max_messages, 'returnImmediately': return_immediately } try: response = service.projects().subscriptions().pull( subscription=full_subscription, body=body).execute(num_retries=self.num_retries) return response.get('receivedMessages', []) except HttpError as e: raise PubSubException( 'Error pulling messages from subscription {}'.format( full_subscription), e)
[python] Pulls up to ``max_messages`` messages from Pub/Sub subscription. :param project: the GCP project ID where the subscription exists :type project: str :param subscription: the Pub/Sub subscription name to pull from; do not include the 'projects/{project}/topics/' prefix. :type subscription: str :param max_messages: The maximum number of messages to return from the Pub/Sub API. :type max_messages: int :param return_immediately: If set, the Pub/Sub API will immediately return if no messages are available. Otherwise, the request will block for an undisclosed, but bounded period of time :type return_immediately: bool :return: A list of Pub/Sub ReceivedMessage objects each containing an ``ackId`` property and a ``message`` property, which includes the base64-encoded message content. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#ReceivedMessage
def acknowledge(self, project, subscription, ack_ids): service = self.get_conn() full_subscription = _format_subscription(project, subscription) try: service.projects().subscriptions().acknowledge( subscription=full_subscription, body={'ackIds': ack_ids} ).execute(num_retries=self.num_retries) except HttpError as e: raise PubSubException( 'Error acknowledging {} messages pulled from subscription {}' .format(len(ack_ids), full_subscription), e)
[python] Pulls up to ``max_messages`` messages from Pub/Sub subscription. :param project: the GCP project name or ID in which to create the topic :type project: str :param subscription: the Pub/Sub subscription name to delete; do not include the 'projects/{project}/topics/' prefix. :type subscription: str :param ack_ids: List of ReceivedMessage ackIds from a previous pull response :type ack_ids: list
def get_dep_statuses(self, ti, session, dep_context=None): # this avoids a circular dependency from airflow.ti_deps.dep_context import DepContext if dep_context is None: dep_context = DepContext() if self.IGNOREABLE and dep_context.ignore_all_deps: yield self._passing_status( reason="Context specified all dependencies should be ignored.") return if self.IS_TASK_DEP and dep_context.ignore_task_deps: yield self._passing_status( reason="Context specified all task dependencies should be ignored.") return for dep_status in self._get_dep_statuses(ti, session, dep_context): yield dep_status
[python] Wrapper around the private _get_dep_statuses method that contains some global checks for all dependencies. :param ti: the task instance to get the dependency status for :type ti: airflow.models.TaskInstance :param session: database session :type session: sqlalchemy.orm.session.Session :param dep_context: the context for which this dependency should be evaluated for :type dep_context: DepContext
def is_met(self, ti, session, dep_context=None): return all(status.passed for status in self.get_dep_statuses(ti, session, dep_context))
[python] Returns whether or not this dependency is met for a given task instance. A dependency is considered met if all of the dependency statuses it reports are passing. :param ti: the task instance to see if this dependency is met for :type ti: airflow.models.TaskInstance :param session: database session :type session: sqlalchemy.orm.session.Session :param dep_context: The context this dependency is being checked under that stores state that can be used by this dependency. :type dep_context: BaseDepContext
def get_failure_reasons(self, ti, session, dep_context=None): for dep_status in self.get_dep_statuses(ti, session, dep_context): if not dep_status.passed: yield dep_status.reason
[python] Returns an iterable of strings that explain why this dependency wasn't met. :param ti: the task instance to see if this dependency is met for :type ti: airflow.models.TaskInstance :param session: database session :type session: sqlalchemy.orm.session.Session :param dep_context: The context this dependency is being checked under that stores state that can be used by this dependency. :type dep_context: BaseDepContext
def _parse_s3_config(config_file_name, config_format='boto', profile=None): config = configparser.ConfigParser() if config.read(config_file_name): # pragma: no cover sections = config.sections() else: raise AirflowException("Couldn't read {0}".format(config_file_name)) # Setting option names depending on file format if config_format is None: config_format = 'boto' conf_format = config_format.lower() if conf_format == 'boto': # pragma: no cover if profile is not None and 'profile ' + profile in sections: cred_section = 'profile ' + profile else: cred_section = 'Credentials' elif conf_format == 'aws' and profile is not None: cred_section = profile else: cred_section = 'default' # Option names if conf_format in ('boto', 'aws'): # pragma: no cover key_id_option = 'aws_access_key_id' secret_key_option = 'aws_secret_access_key' # security_token_option = 'aws_security_token' else: key_id_option = 'access_key' secret_key_option = 'secret_key' # Actual Parsing if cred_section not in sections: raise AirflowException("This config file format is not recognized") else: try: access_key = config.get(cred_section, key_id_option) secret_key = config.get(cred_section, secret_key_option) except Exception: logging.warning("Option Error in parsing s3 config file") raise return access_key, secret_key
[python] Parses a config file for s3 credentials. Can currently parse boto, s3cmd.conf and AWS SDK config formats :param config_file_name: path to the config file :type config_file_name: str :param config_format: config type. One of "boto", "s3cmd" or "aws". Defaults to "boto" :type config_format: str :param profile: profile name in AWS type config file :type profile: str
def get_credentials(self, region_name=None): session, _ = self._get_credentials(region_name) # Credentials are refreshable, so accessing your access key and # secret key separately can lead to a race condition. # See https://stackoverflow.com/a/36291428/8283373 return session.get_credentials().get_frozen_credentials()
[python] Get the underlying `botocore.Credentials` object. This contains the following authentication attributes: access_key, secret_key and token.
def expand_role(self, role): if '/' in role: return role else: return self.get_client_type('iam').get_role(RoleName=role)['Role']['Arn']
[python] If the IAM role is a role name, get the Amazon Resource Name (ARN) for the role. If IAM role is already an IAM role ARN, no change is made. :param role: IAM role name or ARN :return: IAM role ARN
def get_conn(self): conn = self.get_connection(self.vertica_conn_id) conn_config = { "user": conn.login, "password": conn.password or '', "database": conn.schema, "host": conn.host or 'localhost' } if not conn.port: conn_config["port"] = 5433 else: conn_config["port"] = int(conn.port) conn = connect(**conn_config) return conn
[python] Returns verticaql connection object
def set_context(logger, value): _logger = logger while _logger: for handler in _logger.handlers: try: handler.set_context(value) except AttributeError: # Not all handlers need to have context passed in so we ignore # the error when handlers do not have set_context defined. pass if _logger.propagate is True: _logger = _logger.parent else: _logger = None
[python] Walks the tree of loggers and tries to set the context for each handler :param logger: logger :param value: value to set
def write(self, message): if not message.endswith("\n"): self._buffer += message else: self._buffer += message self.logger.log(self.level, self._buffer.rstrip()) self._buffer = str()
[python] Do whatever it takes to actually log the specified logging record :param message: message to log
def flush(self): if len(self._buffer) > 0: self.logger.log(self.level, self._buffer) self._buffer = str()
[python] Ensure all logging output has been flushed
def correct_maybe_zipped(fileloc): _, archive, filename = re.search( r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups() if archive and zipfile.is_zipfile(archive): return archive else: return fileloc
[python] If the path contains a folder with a .zip suffix, then the folder is treated as a zip archive and path to zip is returned.
def list_py_file_paths(directory, safe_mode=True, include_examples=None): if include_examples is None: include_examples = conf.getboolean('core', 'LOAD_EXAMPLES') file_paths = [] if directory is None: return [] elif os.path.isfile(directory): return [directory] elif os.path.isdir(directory): patterns_by_dir = {} for root, dirs, files in os.walk(directory, followlinks=True): patterns = patterns_by_dir.get(root, []) ignore_file = os.path.join(root, '.airflowignore') if os.path.isfile(ignore_file): with open(ignore_file, 'r') as f: # If we have new patterns create a copy so we don't change # the previous list (which would affect other subdirs) patterns += [re.compile(p) for p in f.read().split('\n') if p] # If we can ignore any subdirs entirely we should - fewer paths # to walk is better. We have to modify the ``dirs`` array in # place for this to affect os.walk dirs[:] = [ d for d in dirs if not any(p.search(os.path.join(root, d)) for p in patterns) ] # We want patterns defined in a parent folder's .airflowignore to # apply to subdirs too for d in dirs: patterns_by_dir[os.path.join(root, d)] = patterns for f in files: try: file_path = os.path.join(root, f) if not os.path.isfile(file_path): continue mod_name, file_ext = os.path.splitext( os.path.split(file_path)[-1]) if file_ext != '.py' and not zipfile.is_zipfile(file_path): continue if any([re.findall(p, file_path) for p in patterns]): continue # Heuristic that guesses whether a Python file contains an # Airflow DAG definition. might_contain_dag = True if safe_mode and not zipfile.is_zipfile(file_path): with open(file_path, 'rb') as fp: content = fp.read() might_contain_dag = all( [s in content for s in (b'DAG', b'airflow')]) if not might_contain_dag: continue file_paths.append(file_path) except Exception: log = LoggingMixin().log log.exception("Error while examining %s", f) if include_examples: import airflow.example_dags example_dag_folder = airflow.example_dags.__path__[0] file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False)) return file_paths
[python] Traverse a directory and look for Python files. :param directory: the directory to traverse :type directory: unicode :param safe_mode: whether to use a heuristic to determine whether a file contains Airflow DAG definitions :return: a list of paths to Python files in the specified directory :rtype: list[unicode]
def construct_task_instance(self, session=None, lock_for_update=False): TI = airflow.models.TaskInstance qry = session.query(TI).filter( TI.dag_id == self._dag_id, TI.task_id == self._task_id, TI.execution_date == self._execution_date) if lock_for_update: ti = qry.with_for_update().first() else: ti = qry.first() return ti
[python] Construct a TaskInstance from the database based on the primary key :param session: DB session. :param lock_for_update: if True, indicates that the database should lock the TaskInstance (issuing a FOR UPDATE clause) until the session is committed.
def get_dag(self, dag_id): if dag_id not in self.dag_id_to_simple_dag: raise AirflowException("Unknown DAG ID {}".format(dag_id)) return self.dag_id_to_simple_dag[dag_id]
[python] :param dag_id: DAG ID :type dag_id: unicode :return: if the given DAG ID exists in the bag, return the BaseDag corresponding to that ID. Otherwise, throw an Exception :rtype: airflow.utils.dag_processing.SimpleDag
def start(self): self._process = self._launch_process(self._dag_directory, self._file_paths, self._max_runs, self._processor_factory, self._child_signal_conn, self._stat_queue, self._result_queue, self._async_mode) self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
[python] Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
def harvest_simple_dags(self): # Metadata and results to be harvested can be inconsistent, # but it should not be a big problem. self._sync_metadata() # Heartbeating after syncing metadata so we do not restart manager # if it processed all files for max_run times and exit normally. self._heartbeat_manager() simple_dags = [] # multiprocessing.Queue().qsize will not work on MacOS. if sys.platform == "darwin": qsize = self._result_count else: qsize = self._result_queue.qsize() for _ in range(qsize): simple_dags.append(self._result_queue.get()) self._result_count = 0 return simple_dags
[python] Harvest DAG parsing results from result queue and sync metadata from stat queue. :return: List of parsing result in SimpleDag format.
def _heartbeat_manager(self): if self._process and not self._process.is_alive() and not self.done: self.start()
[python] Heartbeat DAG file processor and start it if it is not alive. :return:
def _sync_metadata(self): while not self._stat_queue.empty(): stat = self._stat_queue.get() self._file_paths = stat.file_paths self._all_pids = stat.all_pids self._done = stat.done self._all_files_processed = stat.all_files_processed self._result_count += stat.result_count
[python] Sync metadata from stat queue and only keep the latest stat. :return:
def terminate(self): self.log.info("Sending termination message to manager.") self._child_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
[python] Send termination signal to DAG parsing processor manager and expect it to terminate all DAG file processors.
def end(self): if not self._process: self.log.warn('Ending without manager process.') return this_process = psutil.Process(os.getpid()) try: manager_process = psutil.Process(self._process.pid) except psutil.NoSuchProcess: self.log.info("Manager process not running.") return # First try SIGTERM if manager_process.is_running() \ and manager_process.pid in [x.pid for x in this_process.children()]: self.log.info("Terminating manager process: %s", manager_process.pid) manager_process.terminate() # TODO: Remove magic number timeout = 5 self.log.info("Waiting up to %ss for manager process to exit...", timeout) try: psutil.wait_procs({manager_process}, timeout) except psutil.TimeoutExpired: self.log.debug("Ran out of time while waiting for " "processes to exit") # Then SIGKILL if manager_process.is_running() \ and manager_process.pid in [x.pid for x in this_process.children()]: self.log.info("Killing manager process: %s", manager_process.pid) manager_process.kill() manager_process.wait()
[python] Terminate (and then kill) the manager process launched. :return:
def _exit_gracefully(self, signum, frame): self.log.info("Exiting gracefully upon receiving signal %s", signum) self.terminate() self.end() self.log.debug("Finished terminating DAG processors.") sys.exit(os.EX_OK)
[python] Helper method to clean up DAG file processors to avoid leaving orphan processes.
def start(self): self.log.info("Processing files using up to %s processes at a time ", self._parallelism) self.log.info("Process each file at most once every %s seconds", self._file_process_interval) self.log.info( "Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval ) if self._async_mode: self.log.debug("Starting DagFileProcessorManager in async mode") self.start_in_async() else: self.log.debug("Starting DagFileProcessorManager in sync mode") self.start_in_sync()
[python] Use multiple processes to parse and generate tasks for the DAGs in parallel. By processing them in separate processes, we can get parallelism and isolation from potentially harmful user code.
def start_in_async(self): while True: loop_start_time = time.time() if self._signal_conn.poll(): agent_signal = self._signal_conn.recv() if agent_signal == DagParsingSignal.TERMINATE_MANAGER: self.terminate() break elif agent_signal == DagParsingSignal.END_MANAGER: self.end() sys.exit(os.EX_OK) self._refresh_dag_dir() simple_dags = self.heartbeat() for simple_dag in simple_dags: self._result_queue.put(simple_dag) self._print_stat() all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths) max_runs_reached = self.max_runs_reached() dag_parsing_stat = DagParsingStat(self._file_paths, self.get_all_pids(), max_runs_reached, all_files_processed, len(simple_dags)) self._stat_queue.put(dag_parsing_stat) if max_runs_reached: self.log.info("Exiting dag parsing loop as all files " "have been processed %s times", self._max_runs) break loop_duration = time.time() - loop_start_time if loop_duration < 1: sleep_length = 1 - loop_duration self.log.debug("Sleeping for %.2f seconds to prevent excessive logging", sleep_length) time.sleep(sleep_length)
[python] Parse DAG files repeatedly in a standalone loop.
def start_in_sync(self): while True: agent_signal = self._signal_conn.recv() if agent_signal == DagParsingSignal.TERMINATE_MANAGER: self.terminate() break elif agent_signal == DagParsingSignal.END_MANAGER: self.end() sys.exit(os.EX_OK) elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT: self._refresh_dag_dir() simple_dags = self.heartbeat() for simple_dag in simple_dags: self._result_queue.put(simple_dag) self._print_stat() all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths) max_runs_reached = self.max_runs_reached() dag_parsing_stat = DagParsingStat(self._file_paths, self.get_all_pids(), self.max_runs_reached(), all_files_processed, len(simple_dags)) self._stat_queue.put(dag_parsing_stat) self.wait_until_finished() self._signal_conn.send(DagParsingSignal.MANAGER_DONE) if max_runs_reached: self.log.info("Exiting dag parsing loop as all files " "have been processed %s times", self._max_runs) self._signal_conn.send(DagParsingSignal.MANAGER_DONE) break
[python] Parse DAG files in a loop controlled by DagParsingSignal. Actual DAG parsing loop will run once upon receiving one agent heartbeat message and will report done when finished the loop.
def _refresh_dag_dir(self): elapsed_time_since_refresh = (timezone.utcnow() - self.last_dag_dir_refresh_time).total_seconds() if elapsed_time_since_refresh > self.dag_dir_list_interval: # Build up a list of Python files that could contain DAGs self.log.info("Searching for files in %s", self._dag_directory) self._file_paths = list_py_file_paths(self._dag_directory) self.last_dag_dir_refresh_time = timezone.utcnow() self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory) self.set_file_paths(self._file_paths) try: self.log.debug("Removing old import errors") self.clear_nonexistent_import_errors() except Exception: self.log.exception("Error removing old import errors")
[python] Refresh file paths from dag dir if we haven't done it for too long.
def _print_stat(self): if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() > self.print_stats_interval): if len(self._file_paths) > 0: self._log_file_processing_stats(self._file_paths) self.last_stat_print_time = timezone.utcnow()
[python] Occasionally print out stats about how fast the files are getting processed
def clear_nonexistent_import_errors(self, session): query = session.query(errors.ImportError) if self._file_paths: query = query.filter( ~errors.ImportError.filename.in_(self._file_paths) ) query.delete(synchronize_session='fetch') session.commit()
[python] Clears import errors for files that no longer exist. :param session: session for ORM operations :type session: sqlalchemy.orm.session.Session
def _log_file_processing_stats(self, known_file_paths): # File Path: Path to the file containing the DAG definition # PID: PID associated with the process that's processing the file. May # be empty. # Runtime: If the process is currently running, how long it's been # running for in seconds. # Last Runtime: If the process ran before, how long did it take to # finish in seconds # Last Run: When the file finished processing in the previous run. headers = ["File Path", "PID", "Runtime", "Last Runtime", "Last Run"] rows = [] for file_path in known_file_paths: last_runtime = self.get_last_runtime(file_path) file_name = os.path.basename(file_path) file_name = os.path.splitext(file_name)[0].replace(os.sep, '.') if last_runtime: Stats.gauge( 'dag_processing.last_runtime.{}'.format(file_name), last_runtime ) processor_pid = self.get_pid(file_path) processor_start_time = self.get_start_time(file_path) runtime = ((timezone.utcnow() - processor_start_time).total_seconds() if processor_start_time else None) last_run = self.get_last_finish_time(file_path) if last_run: seconds_ago = (timezone.utcnow() - last_run).total_seconds() Stats.gauge( 'dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago ) rows.append((file_path, processor_pid, runtime, last_runtime, last_run)) # Sort by longest last runtime. (Can't sort None values in python3) rows = sorted(rows, key=lambda x: x[3] or 0.0) formatted_rows = [] for file_path, pid, runtime, last_runtime, last_run in rows: formatted_rows.append((file_path, pid, "{:.2f}s".format(runtime) if runtime else None, "{:.2f}s".format(last_runtime) if last_runtime else None, last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None)) log_str = ("\n" + "=" * 80 + "\n" + "DAG File Processing Stats\n\n" + tabulate(formatted_rows, headers=headers) + "\n" + "=" * 80) self.log.info(log_str)
[python] Print out stats about how files are getting processed. :param known_file_paths: a list of file paths that may contain Airflow DAG definitions :type known_file_paths: list[unicode] :return: None
def get_pid(self, file_path): if file_path in self._processors: return self._processors[file_path].pid return None
[python] :param file_path: the path to the file that's being processed :type file_path: unicode :return: the PID of the process processing the given file or None if the specified file is not being processed :rtype: int
def get_runtime(self, file_path): if file_path in self._processors: return (timezone.utcnow() - self._processors[file_path].start_time)\ .total_seconds() return None
[python] :param file_path: the path to the file that's being processed :type file_path: unicode :return: the current runtime (in seconds) of the process that's processing the specified file or None if the file is not currently being processed
def get_start_time(self, file_path): if file_path in self._processors: return self._processors[file_path].start_time return None
[python] :param file_path: the path to the file that's being processed :type file_path: unicode :return: the start time of the process that's processing the specified file or None if the file is not currently being processed :rtype: datetime
def set_file_paths(self, new_file_paths): self._file_paths = new_file_paths self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths] # Stop processors that are working on deleted files filtered_processors = {} for file_path, processor in self._processors.items(): if file_path in new_file_paths: filtered_processors[file_path] = processor else: self.log.warning("Stopping processor for %s", file_path) processor.terminate() self._processors = filtered_processors
[python] Update this with a new set of paths to DAG definition files. :param new_file_paths: list of paths to DAG definition files :type new_file_paths: list[unicode] :return: None
def wait_until_finished(self): for file_path, processor in self._processors.items(): while not processor.done: time.sleep(0.1)
[python] Sleeps until all the processors are done.
def heartbeat(self): finished_processors = {} """:type : dict[unicode, AbstractDagFileProcessor]""" running_processors = {} """:type : dict[unicode, AbstractDagFileProcessor]""" for file_path, processor in self._processors.items(): if processor.done: self.log.debug("Processor for %s finished", file_path) now = timezone.utcnow() finished_processors[file_path] = processor self._last_runtime[file_path] = (now - processor.start_time).total_seconds() self._last_finish_time[file_path] = now self._run_count[file_path] += 1 else: running_processors[file_path] = processor self._processors = running_processors self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism) self.log.debug("%s file paths queued for processing", len(self._file_path_queue)) # Collect all the DAGs that were found in the processed files simple_dags = [] for file_path, processor in finished_processors.items(): if processor.result is None: self.log.warning( "Processor for %s exited with return code %s.", processor.file_path, processor.exit_code ) else: for simple_dag in processor.result: simple_dags.append(simple_dag) # Generate more file paths to process if we processed all the files # already. if len(self._file_path_queue) == 0: # If the file path is already being processed, or if a file was # processed recently, wait until the next batch file_paths_in_progress = self._processors.keys() now = timezone.utcnow() file_paths_recently_processed = [] for file_path in self._file_paths: last_finish_time = self.get_last_finish_time(file_path) if (last_finish_time is not None and (now - last_finish_time).total_seconds() < self._file_process_interval): file_paths_recently_processed.append(file_path) files_paths_at_run_limit = [file_path for file_path, num_runs in self._run_count.items() if num_runs == self._max_runs] files_paths_to_queue = list(set(self._file_paths) - set(file_paths_in_progress) - set(file_paths_recently_processed) - set(files_paths_at_run_limit)) for file_path, processor in self._processors.items(): self.log.debug( "File path %s is still being processed (started: %s)", processor.file_path, processor.start_time.isoformat() ) self.log.debug( "Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue) ) self._file_path_queue.extend(files_paths_to_queue) zombies = self._find_zombies() # Start more processors if we have enough slots and files to process while (self._parallelism - len(self._processors) > 0 and len(self._file_path_queue) > 0): file_path = self._file_path_queue.pop(0) processor = self._processor_factory(file_path, zombies) processor.start() self.log.debug( "Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path ) self._processors[file_path] = processor # Update heartbeat count. self._run_count[self._heart_beat_key] += 1 return simple_dags
[python] This should be periodically called by the manager loop. This method will kick off new processes to process DAG definition files and read the results from the finished processors. :return: a list of SimpleDags that were produced by processors that have finished since the last time this was called :rtype: list[airflow.utils.dag_processing.SimpleDag]
def _find_zombies(self, session): now = timezone.utcnow() zombies = [] if (now - self._last_zombie_query_time).total_seconds() \ > self._zombie_query_interval: # to avoid circular imports from airflow.jobs import LocalTaskJob as LJ self.log.info("Finding 'running' jobs without a recent heartbeat") TI = airflow.models.TaskInstance limit_dttm = timezone.utcnow() - timedelta( seconds=self._zombie_threshold_secs) self.log.info("Failing jobs without heartbeat after %s", limit_dttm) tis = ( session.query(TI) .join(LJ, TI.job_id == LJ.id) .filter(TI.state == State.RUNNING) .filter( or_( LJ.state != State.RUNNING, LJ.latest_heartbeat < limit_dttm, ) ).all() ) self._last_zombie_query_time = timezone.utcnow() for ti in tis: zombies.append(SimpleTaskInstance(ti)) return zombies
[python] Find zombie task instances, which are tasks haven't heartbeated for too long. :return: Zombie task instances in SimpleTaskInstance format.
def max_runs_reached(self): if self._max_runs == -1: # Unlimited runs. return False for file_path in self._file_paths: if self._run_count[file_path] < self._max_runs: return False if self._run_count[self._heart_beat_key] < self._max_runs: return False return True
[python] :return: whether all file paths have been processed max_runs times
def end(self): pids_to_kill = self.get_all_pids() if len(pids_to_kill) > 0: # First try SIGTERM this_process = psutil.Process(os.getpid()) # Only check child processes to ensure that we don't have a case # where we kill the wrong process because a child process died # but the PID got reused. child_processes = [x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill] for child in child_processes: self.log.info("Terminating child PID: %s", child.pid) child.terminate() # TODO: Remove magic number timeout = 5 self.log.info("Waiting up to %s seconds for processes to exit...", timeout) try: psutil.wait_procs( child_processes, timeout=timeout, callback=lambda x: self.log.info('Terminated PID %s', x.pid)) except psutil.TimeoutExpired: self.log.debug("Ran out of time while waiting for processes to exit") # Then SIGKILL child_processes = [x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill] if len(child_processes) > 0: self.log.info("SIGKILL processes that did not terminate gracefully") for child in child_processes: self.log.info("Killing child PID: %s", child.pid) child.kill() child.wait()
[python] Kill all child processes on exit since we don't want to leave them as orphaned.
def get_conn(self): self.log.debug('Creating SSH client for conn_id: %s', self.ssh_conn_id) client = paramiko.SSHClient() if not self.allow_host_key_change: self.log.warning('Remote Identification Change is not verified. ' 'This wont protect against Man-In-The-Middle attacks') client.load_system_host_keys() if self.no_host_key_check: self.log.warning('No Host Key Verification. This wont protect ' 'against Man-In-The-Middle attacks') # Default is RejectPolicy client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if self.password and self.password.strip(): client.connect(hostname=self.remote_host, username=self.username, password=self.password, key_filename=self.key_file, timeout=self.timeout, compress=self.compress, port=self.port, sock=self.host_proxy) else: client.connect(hostname=self.remote_host, username=self.username, key_filename=self.key_file, timeout=self.timeout, compress=self.compress, port=self.port, sock=self.host_proxy) if self.keepalive_interval: client.get_transport().set_keepalive(self.keepalive_interval) self.client = client return client
[python] Opens a ssh connection to the remote host. :rtype: paramiko.client.SSHClient
def get_tunnel(self, remote_port, remote_host="localhost", local_port=None): if local_port: local_bind_address = ('localhost', local_port) else: local_bind_address = ('localhost',) if self.password and self.password.strip(): client = SSHTunnelForwarder(self.remote_host, ssh_port=self.port, ssh_username=self.username, ssh_password=self.password, ssh_pkey=self.key_file, ssh_proxy=self.host_proxy, local_bind_address=local_bind_address, remote_bind_address=(remote_host, remote_port), logger=self.log) else: client = SSHTunnelForwarder(self.remote_host, ssh_port=self.port, ssh_username=self.username, ssh_pkey=self.key_file, ssh_proxy=self.host_proxy, local_bind_address=local_bind_address, remote_bind_address=(remote_host, remote_port), host_pkey_directories=[], logger=self.log) return client
[python] Creates a tunnel between two hosts. Like ssh -L <LOCAL_PORT>:host:<REMOTE_PORT>. :param remote_port: The remote port to create a tunnel to :type remote_port: int :param remote_host: The remote host to create a tunnel to (default localhost) :type remote_host: str :param local_port: The local port to attach the tunnel to :type local_port: int :return: sshtunnel.SSHTunnelForwarder object
def create_transfer_job(self, body): body = self._inject_project_id(body, BODY, PROJECT_ID) return self.get_conn().transferJobs().create(body=body).execute(num_retries=self.num_retries)
[python] Creates a transfer job that runs periodically. :param body: (Required) A request body, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body :type body: dict :return: transfer job. See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob :rtype: dict
def get_transfer_job(self, job_name, project_id=None): return ( self.get_conn() .transferJobs() .get(jobName=job_name, projectId=project_id) .execute(num_retries=self.num_retries) )
[python] Gets the latest state of a long-running operation in Google Storage Transfer Service. :param job_name: (Required) Name of the job to be fetched :type job_name: str :param project_id: (Optional) the ID of the project that owns the Transfer Job. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Transfer Job :rtype: dict
def list_transfer_job(self, filter): conn = self.get_conn() filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID) request = conn.transferJobs().list(filter=json.dumps(filter)) jobs = [] while request is not None: response = request.execute(num_retries=self.num_retries) jobs.extend(response[TRANSFER_JOBS]) request = conn.transferJobs().list_next(previous_request=request, previous_response=response) return jobs
[python] Lists long-running operations in Google Storage Transfer Service that match the specified filter. :param filter: (Required) A request filter, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter :type filter: dict :return: List of Transfer Jobs :rtype: list[dict]
def update_transfer_job(self, job_name, body): body = self._inject_project_id(body, BODY, PROJECT_ID) return ( self.get_conn() .transferJobs() .patch(jobName=job_name, body=body) .execute(num_retries=self.num_retries) )
[python] Updates a transfer job that runs periodically. :param job_name: (Required) Name of the job to be updated :type job_name: str :param body: A request body, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body :type body: dict :return: If successful, TransferJob. :rtype: dict
def delete_transfer_job(self, job_name, project_id): return ( self.get_conn() .transferJobs() .patch( jobName=job_name, body={ PROJECT_ID: project_id, TRANSFER_JOB: {STATUS1: GcpTransferJobsStatus.DELETED}, TRANSFER_JOB_FIELD_MASK: STATUS1, }, ) .execute(num_retries=self.num_retries) )
[python] Deletes a transfer job. This is a soft delete. After a transfer job is deleted, the job and all the transfer executions are subject to garbage collection. Transfer jobs become eligible for garbage collection 30 days after soft delete. :param job_name: (Required) Name of the job to be deleted :type job_name: str :param project_id: (Optional) the ID of the project that owns the Transfer Job. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :rtype: None
def cancel_transfer_operation(self, operation_name): self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries)
[python] Cancels an transfer operation in Google Storage Transfer Service. :param operation_name: Name of the transfer operation. :type operation_name: str :rtype: None
def get_transfer_operation(self, operation_name): return ( self.get_conn() .transferOperations() .get(name=operation_name) .execute(num_retries=self.num_retries) )
[python] Gets an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :return: transfer operation See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/Operation :rtype: dict
def list_transfer_operations(self, filter): conn = self.get_conn() filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID) operations = [] request = conn.transferOperations().list(name=TRANSFER_OPERATIONS, filter=json.dumps(filter)) while request is not None: response = request.execute(num_retries=self.num_retries) if OPERATIONS in response: operations.extend(response[OPERATIONS]) request = conn.transferOperations().list_next( previous_request=request, previous_response=response ) return operations
[python] Gets an transfer operation in Google Storage Transfer Service. :param filter: (Required) A request filter, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter With one additional improvement: * project_id is optional if you have a project id defined in the connection See: :ref:`howto/connection:gcp` :type filter: dict :return: transfer operation :rtype: list[dict]
def pause_transfer_operation(self, operation_name): self.get_conn().transferOperations().pause(name=operation_name).execute(num_retries=self.num_retries)
[python] Pauses an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :rtype: None
def resume_transfer_operation(self, operation_name): self.get_conn().transferOperations().resume(name=operation_name).execute(num_retries=self.num_retries)
[python] Resumes an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :rtype: None
def wait_for_transfer_job(self, job, expected_statuses=(GcpTransferOperationStatus.SUCCESS,), timeout=60): while timeout > 0: operations = self.list_transfer_operations( filter={FILTER_PROJECT_ID: job[PROJECT_ID], FILTER_JOB_NAMES: [job[NAME]]} ) if GCPTransferServiceHook.operations_contain_expected_statuses(operations, expected_statuses): return time.sleep(TIME_TO_SLEEP_IN_SECONDS) timeout -= TIME_TO_SLEEP_IN_SECONDS raise AirflowException("Timeout. The operation could not be completed within the allotted time.")
[python] Waits until the job reaches the expected state. :param job: Transfer job See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob :type job: dict :param expected_statuses: State that is expected See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status :type expected_statuses: set[str] :param timeout: :type timeout: time in which the operation must end in seconds :rtype: None
def operations_contain_expected_statuses(operations, expected_statuses): expected_statuses = ( {expected_statuses} if isinstance(expected_statuses, six.string_types) else set(expected_statuses) ) if len(operations) == 0: return False current_statuses = {operation[METADATA][STATUS] for operation in operations} if len(current_statuses - set(expected_statuses)) != len(current_statuses): return True if len(NEGATIVE_STATUSES - current_statuses) != len(NEGATIVE_STATUSES): raise AirflowException( 'An unexpected operation status was encountered. Expected: {}'.format( ", ".join(expected_statuses) ) ) return False
[python] Checks whether the operation list has an operation with the expected status, then returns true If it encounters operations in FAILED or ABORTED state throw :class:`airflow.exceptions.AirflowException`. :param operations: (Required) List of transfer operations to check. :type operations: list[dict] :param expected_statuses: (Required) status that is expected See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status :type expected_statuses: set[str] :return: If there is an operation with the expected state in the operation list, returns true, :raises: airflow.exceptions.AirflowException If it encounters operations with a state in the list, :rtype: bool