Skip to content

Commit

Permalink
[Linting] Backport Ruff upgrade [1.6.x] (mlrun#5253)
Browse files Browse the repository at this point in the history
  • Loading branch information
jond01 authored Mar 6, 2024
1 parent e45c26b commit e1db48c
Show file tree
Hide file tree
Showing 93 changed files with 385 additions and 319 deletions.
6 changes: 3 additions & 3 deletions automation/system_test/prepare.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,9 +394,9 @@ def _enrich_env(self):
spark_service_name = self._get_service_name("app=spark,component=spark-master")
self._env_config["MLRUN_IGUAZIO_API_URL"] = f"https://{api_url_host}"
self._env_config["V3IO_FRAMESD"] = f"https://{framesd_host}"
self._env_config[
"MLRUN_SYSTEM_TESTS_DEFAULT_SPARK_SERVICE"
] = spark_service_name
self._env_config["MLRUN_SYSTEM_TESTS_DEFAULT_SPARK_SERVICE"] = (
spark_service_name
)
self._env_config["V3IO_API"] = f"https://{v3io_api_host}"
self._env_config["MLRUN_DBPATH"] = f"https://{mlrun_api_url}"

Expand Down
2 changes: 1 addition & 1 deletion dev-requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ twine~=3.1
build~=1.0

# formatting & linting
ruff~=0.1.8
ruff~=0.3.0
import-linter~=1.8

# testing
Expand Down
8 changes: 4 additions & 4 deletions mlrun/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -960,10 +960,10 @@ def get_default_function_pod_resources(
with_gpu = (
with_gpu_requests if requirement == "requests" else with_gpu_limits
)
resources[
requirement
] = self.get_default_function_pod_requirement_resources(
requirement, with_gpu
resources[requirement] = (
self.get_default_function_pod_requirement_resources(
requirement, with_gpu
)
)
return resources

Expand Down
18 changes: 9 additions & 9 deletions mlrun/datastore/azure_blob.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,9 +175,9 @@ def get_spark_options(self):

if "client_secret" in st or "client_id" in st or "tenant_id" in st:
res[f"spark.hadoop.fs.azure.account.auth.type.{host}"] = "OAuth"
res[
f"spark.hadoop.fs.azure.account.oauth.provider.type.{host}"
] = "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider"
res[f"spark.hadoop.fs.azure.account.oauth.provider.type.{host}"] = (
"org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider"
)
if "client_id" in st:
res[f"spark.hadoop.fs.azure.account.oauth2.client.id.{host}"] = st[
"client_id"
Expand All @@ -188,14 +188,14 @@ def get_spark_options(self):
]
if "tenant_id" in st:
tenant_id = st["tenant_id"]
res[
f"spark.hadoop.fs.azure.account.oauth2.client.endpoint.{host}"
] = f"https://login.microsoftonline.com/{tenant_id}/oauth2/token"
res[f"spark.hadoop.fs.azure.account.oauth2.client.endpoint.{host}"] = (
f"https://login.microsoftonline.com/{tenant_id}/oauth2/token"
)

if "sas_token" in st:
res[f"spark.hadoop.fs.azure.account.auth.type.{host}"] = "SAS"
res[
f"spark.hadoop.fs.azure.sas.token.provider.type.{host}"
] = "org.apache.hadoop.fs.azurebfs.sas.FixedSASTokenProvider"
res[f"spark.hadoop.fs.azure.sas.token.provider.type.{host}"] = (
"org.apache.hadoop.fs.azurebfs.sas.FixedSASTokenProvider"
)
res[f"spark.hadoop.fs.azure.sas.fixed.token.{host}"] = st["sas_token"]
return res
12 changes: 6 additions & 6 deletions mlrun/datastore/google_cloud_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,13 +147,13 @@ def get_spark_options(self):
if "project_id" in credentials:
res["spark.hadoop.fs.gs.project.id"] = credentials["project_id"]
if "private_key_id" in credentials:
res[
"spark.hadoop.fs.gs.auth.service.account.private.key.id"
] = credentials["private_key_id"]
res["spark.hadoop.fs.gs.auth.service.account.private.key.id"] = (
credentials["private_key_id"]
)
if "private_key" in credentials:
res[
"spark.hadoop.fs.gs.auth.service.account.private.key"
] = credentials["private_key"]
res["spark.hadoop.fs.gs.auth.service.account.private.key"] = (
credentials["private_key"]
)
if "client_email" in credentials:
res["spark.hadoop.fs.gs.auth.service.account.email"] = credentials[
"client_email"
Expand Down
18 changes: 9 additions & 9 deletions mlrun/db/httpdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -1137,17 +1137,17 @@ def list_runtime_resources(
structured_dict = {}
for project, job_runtime_resources_map in response.json().items():
for job_id, runtime_resources in job_runtime_resources_map.items():
structured_dict.setdefault(project, {})[
job_id
] = mlrun.common.schemas.RuntimeResources(**runtime_resources)
structured_dict.setdefault(project, {})[job_id] = (
mlrun.common.schemas.RuntimeResources(**runtime_resources)
)
return structured_dict
elif group_by == mlrun.common.schemas.ListRuntimeResourcesGroupByField.project:
structured_dict = {}
for project, kind_runtime_resources_map in response.json().items():
for kind, runtime_resources in kind_runtime_resources_map.items():
structured_dict.setdefault(project, {})[
kind
] = mlrun.common.schemas.RuntimeResources(**runtime_resources)
structured_dict.setdefault(project, {})[kind] = (
mlrun.common.schemas.RuntimeResources(**runtime_resources)
)
return structured_dict
else:
raise NotImplementedError(
Expand Down Expand Up @@ -1206,9 +1206,9 @@ def delete_runtime_resources(
structured_dict = {}
for project, kind_runtime_resources_map in response.json().items():
for kind, runtime_resources in kind_runtime_resources_map.items():
structured_dict.setdefault(project, {})[
kind
] = mlrun.common.schemas.RuntimeResources(**runtime_resources)
structured_dict.setdefault(project, {})[kind] = (
mlrun.common.schemas.RuntimeResources(**runtime_resources)
)
return structured_dict

def create_schedule(
Expand Down
6 changes: 3 additions & 3 deletions mlrun/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -559,9 +559,9 @@ def log_iteration_results(self, best, summary: list, task: dict, commit=False):
for k, v in get_in(task, ["status", "results"], {}).items():
self._results[k] = v
for artifact in get_in(task, ["status", run_keys.artifacts], []):
self._artifacts_manager.artifacts[
artifact["metadata"]["key"]
] = artifact
self._artifacts_manager.artifacts[artifact["metadata"]["key"]] = (
artifact
)
self._artifacts_manager.link_artifact(
self.project,
self.name,
Expand Down
6 changes: 3 additions & 3 deletions mlrun/frameworks/tf_keras/callbacks/logging_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,9 +389,9 @@ def _add_auto_hyperparameters(self):
):
try:
self._get_hyperparameter(key_chain=learning_rate_key_chain)
self._dynamic_hyperparameters_keys[
learning_rate_key
] = learning_rate_key_chain
self._dynamic_hyperparameters_keys[learning_rate_key] = (
learning_rate_key_chain
)
except (KeyError, IndexError, ValueError):
pass

Expand Down
14 changes: 7 additions & 7 deletions mlrun/frameworks/tf_keras/model_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,13 +263,13 @@ def save(
# Update the paths and log artifacts if context is available:
if self._weights_file is not None:
if self._context is not None:
artifacts[
self._get_weights_file_artifact_name()
] = self._context.log_artifact(
self._weights_file,
local_path=self._weights_file,
artifact_path=output_path,
db_key=False,
artifacts[self._get_weights_file_artifact_name()] = (
self._context.log_artifact(
self._weights_file,
local_path=self._weights_file,
artifact_path=output_path,
db_key=False,
)
)

return artifacts if self._context is not None else None
Expand Down
6 changes: 3 additions & 3 deletions mlrun/kfpops.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,9 +408,9 @@ def mlrun_pipeline(
cmd += ["--label", f"{label}={val}"]
for output in outputs:
cmd += ["-o", str(output)]
file_outputs[
output.replace(".", "_")
] = f"/tmp/{output}" # not using path.join to avoid windows "\"
file_outputs[output.replace(".", "_")] = (
f"/tmp/{output}" # not using path.join to avoid windows "\"
)
if project:
cmd += ["--project", project]
if handler:
Expand Down
16 changes: 8 additions & 8 deletions mlrun/model_monitoring/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,9 +436,9 @@ def _generate_model_endpoint(
] = possible_drift_threshold

model_endpoint.spec.monitoring_mode = monitoring_mode
model_endpoint.status.first_request = (
model_endpoint.status.last_request
) = datetime_now().isoformat()
model_endpoint.status.first_request = model_endpoint.status.last_request = (
datetime_now().isoformat()
)
if sample_set_statistics:
model_endpoint.status.feature_stats = sample_set_statistics

Expand Down Expand Up @@ -476,11 +476,11 @@ def trigger_drift_batch_job(
db_session = mlrun.get_run_db()

# Register the monitoring batch job (do nothing if already exist) and get the job function as a dictionary
batch_function_dict: typing.Dict[
str, typing.Any
] = db_session.deploy_monitoring_batch_job(
project=project,
default_batch_image=default_batch_image,
batch_function_dict: typing.Dict[str, typing.Any] = (
db_session.deploy_monitoring_batch_job(
project=project,
default_batch_image=default_batch_image,
)
)

# Prepare current run params
Expand Down
26 changes: 13 additions & 13 deletions mlrun/model_monitoring/stores/kv_model_endpoint_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,24 +540,24 @@ def validate_old_schema_fields(endpoint: dict):
and endpoint[mlrun.common.schemas.model_monitoring.EventFieldType.METRICS]
== "null"
):
endpoint[
mlrun.common.schemas.model_monitoring.EventFieldType.METRICS
] = json.dumps(
{
mlrun.common.schemas.model_monitoring.EventKeyMetrics.GENERIC: {
mlrun.common.schemas.model_monitoring.EventLiveStats.LATENCY_AVG_1H: 0,
mlrun.common.schemas.model_monitoring.EventLiveStats.PREDICTIONS_PER_SECOND: 0,
endpoint[mlrun.common.schemas.model_monitoring.EventFieldType.METRICS] = (
json.dumps(
{
mlrun.common.schemas.model_monitoring.EventKeyMetrics.GENERIC: {
mlrun.common.schemas.model_monitoring.EventLiveStats.LATENCY_AVG_1H: 0,
mlrun.common.schemas.model_monitoring.EventLiveStats.PREDICTIONS_PER_SECOND: 0,
}
}
}
)
)
# Validate key `uid` instead of `endpoint_id`
# For backwards compatibility reasons, we replace the `endpoint_id` with `uid` which is the updated key name
if mlrun.common.schemas.model_monitoring.EventFieldType.ENDPOINT_ID in endpoint:
endpoint[
mlrun.common.schemas.model_monitoring.EventFieldType.UID
] = endpoint[
mlrun.common.schemas.model_monitoring.EventFieldType.ENDPOINT_ID
]
endpoint[mlrun.common.schemas.model_monitoring.EventFieldType.UID] = (
endpoint[
mlrun.common.schemas.model_monitoring.EventFieldType.ENDPOINT_ID
]
)

@staticmethod
def _encode_field(field: typing.Union[str, bytes]) -> bytes:
Expand Down
1 change: 0 additions & 1 deletion mlrun/model_monitoring/stores/sql_model_endpoint_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@


class SQLModelEndpointStore(ModelEndpointStore):

"""
Handles the DB operations when the DB target is from type SQL. For the SQL operations, we use SQLAlchemy, a Python
SQL toolkit that handles the communication with the database. When using SQL for storing the model endpoints
Expand Down
6 changes: 3 additions & 3 deletions mlrun/package/packagers/pandas_packagers.py
Original file line number Diff line number Diff line change
Expand Up @@ -838,9 +838,9 @@ def _prepare_result(obj: Union[list, dict, tuple]) -> Any:
"""
if isinstance(obj, dict):
for key, value in obj.items():
obj[
PandasDataFramePackager._prepare_result(obj=key)
] = PandasDataFramePackager._prepare_result(obj=value)
obj[PandasDataFramePackager._prepare_result(obj=key)] = (
PandasDataFramePackager._prepare_result(obj=value)
)
elif isinstance(obj, list):
for i, value in enumerate(obj):
obj[i] = PandasDataFramePackager._prepare_result(obj=value)
Expand Down
18 changes: 9 additions & 9 deletions mlrun/runtimes/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,15 +432,15 @@ def with_http(
raise ValueError(
"gateway timeout must be greater than the worker timeout"
)
annotations[
"nginx.ingress.kubernetes.io/proxy-connect-timeout"
] = f"{gateway_timeout}"
annotations[
"nginx.ingress.kubernetes.io/proxy-read-timeout"
] = f"{gateway_timeout}"
annotations[
"nginx.ingress.kubernetes.io/proxy-send-timeout"
] = f"{gateway_timeout}"
annotations["nginx.ingress.kubernetes.io/proxy-connect-timeout"] = (
f"{gateway_timeout}"
)
annotations["nginx.ingress.kubernetes.io/proxy-read-timeout"] = (
f"{gateway_timeout}"
)
annotations["nginx.ingress.kubernetes.io/proxy-send-timeout"] = (
f"{gateway_timeout}"
)

trigger = nuclio.HttpTrigger(
workers=workers,
Expand Down
12 changes: 6 additions & 6 deletions mlrun/runtimes/mpijob/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,13 +196,13 @@ def with_autotune(
if steps_per_sample is not None:
horovod_autotune_settings["autotune-steps-per-sample"] = steps_per_sample
if bayes_opt_max_samples is not None:
horovod_autotune_settings[
"autotune-bayes-opt-max-samples"
] = bayes_opt_max_samples
horovod_autotune_settings["autotune-bayes-opt-max-samples"] = (
bayes_opt_max_samples
)
if gaussian_process_noise is not None:
horovod_autotune_settings[
"autotune-gaussian-process-noise"
] = gaussian_process_noise
horovod_autotune_settings["autotune-gaussian-process-noise"] = (
gaussian_process_noise
)

self.set_envs(horovod_autotune_settings)

Expand Down
6 changes: 3 additions & 3 deletions mlrun/runtimes/pod.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,9 +430,9 @@ def enrich_resources_with_default_pod_resources(
)
is None
):
resources[resource_requirement][
resource_type
] = default_resources[resource_requirement][resource_type]
resources[resource_requirement][resource_type] = (
default_resources[resource_requirement][resource_type]
)
# This enables the user to define that no defaults would be applied on the resources
elif resources == {}:
return resources
Expand Down
6 changes: 3 additions & 3 deletions mlrun/runtimes/serving.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,9 +523,9 @@ def _deploy_function_refs(self, builder_env: dict = None):
function_object.metadata.tag = self.metadata.tag

function_object.metadata.labels = function_object.metadata.labels or {}
function_object.metadata.labels[
"mlrun/parent-function"
] = self.metadata.name
function_object.metadata.labels["mlrun/parent-function"] = (
self.metadata.name
)
function_object._is_child_function = True
if not function_object.spec.graph:
# copy the current graph only if the child doesnt have a graph of his own
Expand Down
6 changes: 3 additions & 3 deletions mlrun/runtimes/sparkjob/spark3job.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,9 +345,9 @@ def enrich_resources_with_default_pod_resources(
)
is None
):
resources[resource_requirement][
resource_type
] = default_resources[resource_requirement][resource_type]
resources[resource_requirement][resource_type] = (
default_resources[resource_requirement][resource_type]
)
else:
resources = default_resources

Expand Down
6 changes: 3 additions & 3 deletions mlrun/utils/async_http.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,9 +139,9 @@ async def _do_request(self) -> aiohttp.ClientResponse:

# enrich user agent
# will help traceability and debugging
headers[
aiohttp.hdrs.USER_AGENT
] = f"{aiohttp.http.SERVER_SOFTWARE} mlrun/{config.version}"
headers[aiohttp.hdrs.USER_AGENT] = (
f"{aiohttp.http.SERVER_SOFTWARE} mlrun/{config.version}"
)

response: typing.Optional[
aiohttp.ClientResponse
Expand Down
6 changes: 3 additions & 3 deletions mlrun/utils/http.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,9 +110,9 @@ def __init__(
def request(self, method, url, **kwargs):
retry_count = 0
kwargs.setdefault("headers", {})
kwargs["headers"][
"User-Agent"
] = f"{requests.utils.default_user_agent()} mlrun/{config.version}"
kwargs["headers"]["User-Agent"] = (
f"{requests.utils.default_user_agent()} mlrun/{config.version}"
)
while True:
try:
response = super().request(method, url, **kwargs)
Expand Down
Loading

0 comments on commit e1db48c

Please sign in to comment.