diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 46497eed8950c..ac87f68598992 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -242,7 +242,7 @@ repos: name: Run pydocstyle args: - --convention=pep257 - - --add-ignore=D100,D102,D103,D104,D105,D107,D202,D205,D400,D401 + - --add-ignore=D100,D102,D103,D104,D105,D107,D205,D400,D401 exclude: | (?x) ^tests/.*\.py$| diff --git a/airflow/cli/commands/dag_command.py b/airflow/cli/commands/dag_command.py index a06c5a706907a..a219c94da900e 100644 --- a/airflow/cli/commands/dag_command.py +++ b/airflow/cli/commands/dag_command.py @@ -253,7 +253,6 @@ def dag_state(args, session=NEW_SESSION): >>> airflow dags state a_dag_with_conf_passed 2015-01-01T00:00:00.000000 failed, {"name": "bob", "age": "42"} """ - dag = DagModel.get_dagmodel(args.dag_id, session=session) if not dag: diff --git a/airflow/models/dag.py b/airflow/models/dag.py index ece03bafe8c92..4e4278b2eeb06 100644 --- a/airflow/models/dag.py +++ b/airflow/models/dag.py @@ -1976,7 +1976,6 @@ def partial_subset( :param include_direct_upstream: Include all tasks directly upstream of matched and downstream (if include_downstream = True) tasks """ - from airflow.models.baseoperator import BaseOperator from airflow.models.mappedoperator import MappedOperator diff --git a/airflow/models/mappedoperator.py b/airflow/models/mappedoperator.py index fe18a97cc1414..ba7328d2f7a33 100644 --- a/airflow/models/mappedoperator.py +++ b/airflow/models/mappedoperator.py @@ -843,7 +843,6 @@ def run_time_mapped_ti_count(self, run_id: str, *, session: Session) -> Optional :return: None if upstream tasks are not complete yet, or else total number of mapped TIs this task should have """ - lengths = self._get_map_lengths(run_id, session=session) expansion_kwargs = self._get_expansion_kwargs() diff --git a/airflow/providers/amazon/aws/hooks/quicksight.py b/airflow/providers/amazon/aws/hooks/quicksight.py index a7e90c36cf92a..a11ad2781b39d 100644 --- a/airflow/providers/amazon/aws/hooks/quicksight.py +++ b/airflow/providers/amazon/aws/hooks/quicksight.py @@ -72,7 +72,6 @@ def create_ingestion( having Ingestion ARN, HTTP status, ingestion ID and ingestion status. :rtype: Dict """ - self.log.info("Creating QuickSight Ingestion for data set id %s.", data_set_id) quicksight_client = self.get_conn() try: @@ -136,7 +135,6 @@ def wait_for_state( will check the status of QuickSight Ingestion :return: response of describe_ingestion call after Ingestion is is done """ - sec = 0 status = self.get_status(aws_account_id, data_set_id, ingestion_id) while status in self.NON_TERMINAL_STATES and status != target_state: diff --git a/airflow/providers/amazon/aws/hooks/s3.py b/airflow/providers/amazon/aws/hooks/s3.py index e7e9f2de508da..fd130a5bdd75c 100644 --- a/airflow/providers/amazon/aws/hooks/s3.py +++ b/airflow/providers/amazon/aws/hooks/s3.py @@ -162,7 +162,6 @@ def get_s3_bucket_key( :return: the parsed bucket name and key :rtype: tuple of str """ - if bucket is None: return S3Hook.parse_s3_url(key) diff --git a/airflow/providers/amazon/aws/hooks/sts.py b/airflow/providers/amazon/aws/hooks/sts.py index aff787ee5d70e..78ecad74d9f54 100644 --- a/airflow/providers/amazon/aws/hooks/sts.py +++ b/airflow/providers/amazon/aws/hooks/sts.py @@ -33,7 +33,6 @@ def __init__(self, *args, **kwargs): def get_account_number(self) -> str: """Get the account Number""" - try: return self.get_conn().get_caller_identity()['Account'] except Exception as general_error: diff --git a/airflow/providers/amazon/aws/sensors/rds.py b/airflow/providers/amazon/aws/sensors/rds.py index 54ee50875e90a..264f3e7fe1e3f 100644 --- a/airflow/providers/amazon/aws/sensors/rds.py +++ b/airflow/providers/amazon/aws/sensors/rds.py @@ -56,7 +56,6 @@ def _describe_item(self, item_type: str, item_name: str) -> list: def _check_item(self, item_type: str, item_name: str) -> bool: """Get certain item from `_describe_item()` and check its status""" - try: items = self._describe_item(item_type, item_name) except ClientError: diff --git a/airflow/providers/cncf/kubernetes/hooks/kubernetes.py b/airflow/providers/cncf/kubernetes/hooks/kubernetes.py index e15dce67ef40a..ad7762ed4fe3e 100644 --- a/airflow/providers/cncf/kubernetes/hooks/kubernetes.py +++ b/airflow/providers/cncf/kubernetes/hooks/kubernetes.py @@ -180,7 +180,6 @@ def _deprecation_warning_core_param(deprecation_warnings): def get_conn(self) -> Any: """Returns kubernetes api session for use with requests""" - in_cluster = self._coalesce_param( self.in_cluster, self.conn_extras.get("extra__kubernetes__in_cluster") or None ) diff --git a/airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py b/airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py index 056d6585ab58a..3b4366dca98c0 100644 --- a/airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py +++ b/airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py @@ -593,7 +593,6 @@ def _patch_deprecated_k8s_settings(self, hook: KubernetesHook): When we find values there that we need to apply on the hook, we patch special hook attributes here. """ - # default for enable_tcp_keepalive is True; patch if False if conf.getboolean('kubernetes', 'enable_tcp_keepalive') is False: hook._deprecated_core_disable_tcp_keepalive = True diff --git a/airflow/providers/google/cloud/hooks/bigquery.py b/airflow/providers/google/cloud/hooks/bigquery.py index d4f54f56cef09..70795efd6530d 100644 --- a/airflow/providers/google/cloud/hooks/bigquery.py +++ b/airflow/providers/google/cloud/hooks/bigquery.py @@ -361,7 +361,6 @@ def create_empty_table( :param exists_ok: If ``True``, ignore "already exists" errors when creating the table. :return: Created table """ - _table_resource: Dict[str, Any] = {} if self.location: diff --git a/airflow/providers/google/cloud/hooks/looker.py b/airflow/providers/google/cloud/hooks/looker.py index 845211260bbbb..425b167956605 100644 --- a/airflow/providers/google/cloud/hooks/looker.py +++ b/airflow/providers/google/cloud/hooks/looker.py @@ -185,7 +185,6 @@ def wait_for_job( def get_looker_sdk(self): """Returns Looker SDK client for Looker API 4.0.""" - conn = self.get_connection(self.looker_conn_id) settings = LookerApiSettings(conn) @@ -214,7 +213,6 @@ def read_config(self): Overrides the default logic of getting connection settings. Fetches the connection settings from Airflow's connection object. """ - config = {} if self.conn.host is None: diff --git a/airflow/providers/hashicorp/secrets/vault.py b/airflow/providers/hashicorp/secrets/vault.py index 4ff25d71b48e9..52b019eeac12e 100644 --- a/airflow/providers/hashicorp/secrets/vault.py +++ b/airflow/providers/hashicorp/secrets/vault.py @@ -175,7 +175,6 @@ def get_conn_uri(self, conn_id: str) -> Optional[str]: :rtype: str :return: The connection uri retrieved from the secret """ - # Since VaultBackend implements `get_connection`, `get_conn_uri` is not used. So we # don't need to implement (or direct users to use) method `get_conn_value` instead warnings.warn( diff --git a/airflow/utils/db.py b/airflow/utils/db.py index c3262318c1cec..e8790de823b67 100644 --- a/airflow/utils/db.py +++ b/airflow/utils/db.py @@ -861,7 +861,6 @@ def reflect_tables(tables: List[Union[Base, str]], session): This function gets the current state of each table in the set of models provided and returns a SqlAlchemy metadata object containing them. """ - import sqlalchemy.schema metadata = sqlalchemy.schema.MetaData(session.bind) @@ -1173,7 +1172,6 @@ def _move_duplicate_data_to_new_table( building the DELETE FROM join condition. :param target_table_name: name of the table in which to park the duplicate rows """ - bind = session.get_bind() dialect_name = bind.dialect.name query = ( diff --git a/airflow/utils/process_utils.py b/airflow/utils/process_utils.py index d547f2c0deb47..4ae1f4398005e 100644 --- a/airflow/utils/process_utils.py +++ b/airflow/utils/process_utils.py @@ -320,7 +320,6 @@ def set_new_process_group() -> None: rather than having to iterate the child processes. If current process spawn by system call ``exec()`` than keep current process group """ - if os.getpid() == os.getsid(0): # If PID = SID than process a session leader, and it is not possible to change process group return