From 0159f474c6bbc15f20d52bc946bd252bd852b196 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 30 Dec 2025 09:11:27 +0500 Subject: [PATCH 01/54] set up folder structure and base code --- openml/_api/__init__.py | 8 +++ openml/_api/config.py | 5 ++ openml/_api/http/__init__.py | 1 + openml/_api/http/client.py | 23 ++++++ openml/_api/http/utils.py | 0 openml/_api/resources/__init__.py | 2 + openml/_api/resources/base.py | 22 ++++++ openml/_api/resources/datasets.py | 13 ++++ openml/_api/resources/tasks.py | 113 ++++++++++++++++++++++++++++++ openml/_api/runtime/core.py | 58 +++++++++++++++ openml/_api/runtime/fallback.py | 5 ++ openml/tasks/functions.py | 8 ++- 12 files changed, 255 insertions(+), 3 deletions(-) create mode 100644 openml/_api/__init__.py create mode 100644 openml/_api/config.py create mode 100644 openml/_api/http/__init__.py create mode 100644 openml/_api/http/client.py create mode 100644 openml/_api/http/utils.py create mode 100644 openml/_api/resources/__init__.py create mode 100644 openml/_api/resources/base.py create mode 100644 openml/_api/resources/datasets.py create mode 100644 openml/_api/resources/tasks.py create mode 100644 openml/_api/runtime/core.py create mode 100644 openml/_api/runtime/fallback.py diff --git a/openml/_api/__init__.py b/openml/_api/__init__.py new file mode 100644 index 000000000..5089f94dd --- /dev/null +++ b/openml/_api/__init__.py @@ -0,0 +1,8 @@ +from openml._api.runtime.core import APIContext + + +def set_api_version(version: str, strict=False): + api_context.set_version(version=version, strict=strict) + + +api_context = APIContext() diff --git a/openml/_api/config.py b/openml/_api/config.py new file mode 100644 index 000000000..bd93c3cad --- /dev/null +++ b/openml/_api/config.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +API_V1_SERVER = "https://www.openml.org/api/v1/xml" +API_V2_SERVER = "http://127.0.0.1:8001" +API_KEY = "..." diff --git a/openml/_api/http/__init__.py b/openml/_api/http/__init__.py new file mode 100644 index 000000000..fde2a5b0a --- /dev/null +++ b/openml/_api/http/__init__.py @@ -0,0 +1 @@ +from openml._api.http.client import HTTPClient diff --git a/openml/_api/http/client.py b/openml/_api/http/client.py new file mode 100644 index 000000000..81a9213e3 --- /dev/null +++ b/openml/_api/http/client.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +import requests + +from openml.__version__ import __version__ + + +class HTTPClient: + def __init__(self, base_url: str): + self.base_url = base_url + self.headers = {"user-agent": f"openml-python/{__version__}"} + + def get(self, path, params=None): + url = f"{self.base_url}/{path}" + return requests.get(url, params=params, headers=self.headers) + + def post(self, path, data=None, files=None): + url = f"{self.base_url}/{path}" + return requests.post(url, data=data, files=files, headers=self.headers) + + def delete(self, path, params=None): + url = f"{self.base_url}/{path}" + return requests.delete(url, params=params, headers=self.headers) diff --git a/openml/_api/http/utils.py b/openml/_api/http/utils.py new file mode 100644 index 000000000..e69de29bb diff --git a/openml/_api/resources/__init__.py b/openml/_api/resources/__init__.py new file mode 100644 index 000000000..078fc5998 --- /dev/null +++ b/openml/_api/resources/__init__.py @@ -0,0 +1,2 @@ +from openml._api.resources.datasets import DatasetsV1, DatasetsV2 +from openml._api.resources.tasks import TasksV1, TasksV2 diff --git a/openml/_api/resources/base.py b/openml/_api/resources/base.py new file mode 100644 index 000000000..1fae27665 --- /dev/null +++ b/openml/_api/resources/base.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from openml._api.http import HTTPClient + + +class ResourceAPI: + def __init__(self, http: HTTPClient): + self._http = http + + +class DatasetsAPI(ResourceAPI, ABC): + @abstractmethod + def get(self, id: int) -> dict: ... + + +class TasksAPI(ResourceAPI, ABC): + @abstractmethod + def get(self, id: int) -> dict: ... diff --git a/openml/_api/resources/datasets.py b/openml/_api/resources/datasets.py new file mode 100644 index 000000000..cd1bb595a --- /dev/null +++ b/openml/_api/resources/datasets.py @@ -0,0 +1,13 @@ +from __future__ import annotations + +from openml._api.resources.base import DatasetsAPI + + +class DatasetsV1(DatasetsAPI): + def get(self, id): + pass + + +class DatasetsV2(DatasetsAPI): + def get(self, id): + pass diff --git a/openml/_api/resources/tasks.py b/openml/_api/resources/tasks.py new file mode 100644 index 000000000..b0e9afbf8 --- /dev/null +++ b/openml/_api/resources/tasks.py @@ -0,0 +1,113 @@ +from __future__ import annotations + +import xmltodict + +from openml._api.resources.base import TasksAPI +from openml.tasks.task import ( + OpenMLClassificationTask, + OpenMLClusteringTask, + OpenMLLearningCurveTask, + OpenMLRegressionTask, + OpenMLTask, + TaskType, +) + + +class TasksV1(TasksAPI): + def get(self, id, return_response=False): + path = f"task/{id}" + response = self._http.get(path) + xml_content = response.content + task = self._create_task_from_xml(xml_content) + + if return_response: + return task, response + + return task + + def _create_task_from_xml(self, xml: str) -> OpenMLTask: + """Create a task given a xml string. + + Parameters + ---------- + xml : string + Task xml representation. + + Returns + ------- + OpenMLTask + """ + dic = xmltodict.parse(xml)["oml:task"] + estimation_parameters = {} + inputs = {} + # Due to the unordered structure we obtain, we first have to extract + # the possible keys of oml:input; dic["oml:input"] is a list of + # OrderedDicts + + # Check if there is a list of inputs + if isinstance(dic["oml:input"], list): + for input_ in dic["oml:input"]: + name = input_["@name"] + inputs[name] = input_ + # Single input case + elif isinstance(dic["oml:input"], dict): + name = dic["oml:input"]["@name"] + inputs[name] = dic["oml:input"] + + evaluation_measures = None + if "evaluation_measures" in inputs: + evaluation_measures = inputs["evaluation_measures"]["oml:evaluation_measures"][ + "oml:evaluation_measure" + ] + + task_type = TaskType(int(dic["oml:task_type_id"])) + common_kwargs = { + "task_id": dic["oml:task_id"], + "task_type": dic["oml:task_type"], + "task_type_id": task_type, + "data_set_id": inputs["source_data"]["oml:data_set"]["oml:data_set_id"], + "evaluation_measure": evaluation_measures, + } + # TODO: add OpenMLClusteringTask? + if task_type in ( + TaskType.SUPERVISED_CLASSIFICATION, + TaskType.SUPERVISED_REGRESSION, + TaskType.LEARNING_CURVE, + ): + # Convert some more parameters + for parameter in inputs["estimation_procedure"]["oml:estimation_procedure"][ + "oml:parameter" + ]: + name = parameter["@name"] + text = parameter.get("#text", "") + estimation_parameters[name] = text + + common_kwargs["estimation_procedure_type"] = inputs["estimation_procedure"][ + "oml:estimation_procedure" + ]["oml:type"] + common_kwargs["estimation_procedure_id"] = int( + inputs["estimation_procedure"]["oml:estimation_procedure"]["oml:id"] + ) + + common_kwargs["estimation_parameters"] = estimation_parameters + common_kwargs["target_name"] = inputs["source_data"]["oml:data_set"][ + "oml:target_feature" + ] + common_kwargs["data_splits_url"] = inputs["estimation_procedure"][ + "oml:estimation_procedure" + ]["oml:data_splits_url"] + + cls = { + TaskType.SUPERVISED_CLASSIFICATION: OpenMLClassificationTask, + TaskType.SUPERVISED_REGRESSION: OpenMLRegressionTask, + TaskType.CLUSTERING: OpenMLClusteringTask, + TaskType.LEARNING_CURVE: OpenMLLearningCurveTask, + }.get(task_type) + if cls is None: + raise NotImplementedError(f"Task type {common_kwargs['task_type']} not supported.") + return cls(**common_kwargs) # type: ignore + + +class TasksV2(TasksAPI): + def get(self, id): + pass diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py new file mode 100644 index 000000000..80f35587c --- /dev/null +++ b/openml/_api/runtime/core.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from openml._api.config import ( + API_V1_SERVER, + API_V2_SERVER, +) +from openml._api.http.client import HTTPClient +from openml._api.resources import ( + DatasetsV1, + DatasetsV2, + TasksV1, + TasksV2, +) +from openml._api.runtime.fallback import FallbackProxy + + +class APIBackend: + def __init__(self, *, datasets, tasks): + self.datasets = datasets + self.tasks = tasks + + +def build_backend(version: str, strict: bool) -> APIBackend: + v1_http = HTTPClient(API_V1_SERVER) + v2_http = HTTPClient(API_V2_SERVER) + + v1 = APIBackend( + datasets=DatasetsV1(v1_http), + tasks=TasksV1(v1_http), + ) + + if version == "v1": + return v1 + + v2 = APIBackend( + datasets=DatasetsV2(v2_http), + tasks=TasksV2(v2_http), + ) + + if strict: + return v2 + + return APIBackend( + datasets=FallbackProxy(v2.datasets, v1.datasets), + tasks=FallbackProxy(v2.tasks, v1.tasks), + ) + + +class APIContext: + def __init__(self): + self._backend = build_backend("v1", strict=False) + + def set_version(self, version: str, strict: bool = False): + self._backend = build_backend(version, strict) + + @property + def backend(self): + return self._backend diff --git a/openml/_api/runtime/fallback.py b/openml/_api/runtime/fallback.py new file mode 100644 index 000000000..56e96a966 --- /dev/null +++ b/openml/_api/runtime/fallback.py @@ -0,0 +1,5 @@ +from __future__ import annotations + + +class FallbackProxy: + pass diff --git a/openml/tasks/functions.py b/openml/tasks/functions.py index d2bf5e946..91be65965 100644 --- a/openml/tasks/functions.py +++ b/openml/tasks/functions.py @@ -12,6 +12,7 @@ import openml._api_calls import openml.utils +from openml._api import api_context from openml.datasets import get_dataset from openml.exceptions import OpenMLCacheException @@ -442,11 +443,12 @@ def _get_task_description(task_id: int) -> OpenMLTask: except OpenMLCacheException: _cache_dir = openml.utils._create_cache_directory_for_id(TASKS_CACHE_DIR_NAME, task_id) xml_file = _cache_dir / "task.xml" - task_xml = openml._api_calls._perform_api_call("task/%d" % task_id, "get") + task, response = api_context.backend.tasks.get(task_id, return_response=True) with xml_file.open("w", encoding="utf8") as fh: - fh.write(task_xml) - return _create_task_from_xml(task_xml) + fh.write(response.text) + + return task def _create_task_from_xml(xml: str) -> OpenMLTask: From 52ef37999fad8509e5e85b8512e442bd9dc69e04 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 5 Jan 2026 12:48:58 +0500 Subject: [PATCH 02/54] fix pre-commit --- openml/_api/__init__.py | 2 +- openml/_api/http/__init__.py | 2 ++ openml/_api/http/client.py | 32 +++++++++++++++++++++++-------- openml/_api/resources/__init__.py | 2 ++ openml/_api/resources/base.py | 13 +++++++++++-- openml/_api/resources/datasets.py | 15 +++++++++++---- openml/_api/resources/tasks.py | 25 +++++++++++++++++++----- openml/_api/runtime/__init__.py | 0 openml/_api/runtime/core.py | 23 +++++++++++----------- openml/_api/runtime/fallback.py | 9 ++++++++- openml/tasks/functions.py | 12 ++++++++---- 11 files changed, 99 insertions(+), 36 deletions(-) create mode 100644 openml/_api/runtime/__init__.py diff --git a/openml/_api/__init__.py b/openml/_api/__init__.py index 5089f94dd..881f40671 100644 --- a/openml/_api/__init__.py +++ b/openml/_api/__init__.py @@ -1,7 +1,7 @@ from openml._api.runtime.core import APIContext -def set_api_version(version: str, strict=False): +def set_api_version(version: str, *, strict: bool = False) -> None: api_context.set_version(version=version, strict=strict) diff --git a/openml/_api/http/__init__.py b/openml/_api/http/__init__.py index fde2a5b0a..8e6d1e4ce 100644 --- a/openml/_api/http/__init__.py +++ b/openml/_api/http/__init__.py @@ -1 +1,3 @@ from openml._api.http.client import HTTPClient + +__all__ = ["HTTPClient"] diff --git a/openml/_api/http/client.py b/openml/_api/http/client.py index 81a9213e3..dea5de809 100644 --- a/openml/_api/http/client.py +++ b/openml/_api/http/client.py @@ -1,23 +1,39 @@ from __future__ import annotations +from typing import Any, Mapping + import requests +from requests import Response from openml.__version__ import __version__ class HTTPClient: - def __init__(self, base_url: str): + def __init__(self, base_url: str) -> None: self.base_url = base_url - self.headers = {"user-agent": f"openml-python/{__version__}"} + self.headers: dict[str, str] = {"user-agent": f"openml-python/{__version__}"} - def get(self, path, params=None): + def get( + self, + path: str, + params: Mapping[str, Any] | None = None, + ) -> Response: url = f"{self.base_url}/{path}" - return requests.get(url, params=params, headers=self.headers) + return requests.get(url, params=params, headers=self.headers, timeout=10) - def post(self, path, data=None, files=None): + def post( + self, + path: str, + data: Mapping[str, Any] | None = None, + files: Any = None, + ) -> Response: url = f"{self.base_url}/{path}" - return requests.post(url, data=data, files=files, headers=self.headers) + return requests.post(url, data=data, files=files, headers=self.headers, timeout=10) - def delete(self, path, params=None): + def delete( + self, + path: str, + params: Mapping[str, Any] | None = None, + ) -> Response: url = f"{self.base_url}/{path}" - return requests.delete(url, params=params, headers=self.headers) + return requests.delete(url, params=params, headers=self.headers, timeout=10) diff --git a/openml/_api/resources/__init__.py b/openml/_api/resources/__init__.py index 078fc5998..b1af3c1a8 100644 --- a/openml/_api/resources/__init__.py +++ b/openml/_api/resources/__init__.py @@ -1,2 +1,4 @@ from openml._api.resources.datasets import DatasetsV1, DatasetsV2 from openml._api.resources.tasks import TasksV1, TasksV2 + +__all__ = ["DatasetsV1", "DatasetsV2", "TasksV1", "TasksV2"] diff --git a/openml/_api/resources/base.py b/openml/_api/resources/base.py index 1fae27665..6fbf8977d 100644 --- a/openml/_api/resources/base.py +++ b/openml/_api/resources/base.py @@ -4,7 +4,11 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: + from requests import Response + from openml._api.http import HTTPClient + from openml.datasets.dataset import OpenMLDataset + from openml.tasks.task import OpenMLTask class ResourceAPI: @@ -14,9 +18,14 @@ def __init__(self, http: HTTPClient): class DatasetsAPI(ResourceAPI, ABC): @abstractmethod - def get(self, id: int) -> dict: ... + def get(self, dataset_id: int) -> OpenMLDataset | tuple[OpenMLDataset, Response]: ... class TasksAPI(ResourceAPI, ABC): @abstractmethod - def get(self, id: int) -> dict: ... + def get( + self, + task_id: int, + *, + return_response: bool = False, + ) -> OpenMLTask | tuple[OpenMLTask, Response]: ... diff --git a/openml/_api/resources/datasets.py b/openml/_api/resources/datasets.py index cd1bb595a..9ff1ec278 100644 --- a/openml/_api/resources/datasets.py +++ b/openml/_api/resources/datasets.py @@ -1,13 +1,20 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from openml._api.resources.base import DatasetsAPI +if TYPE_CHECKING: + from responses import Response + + from openml.datasets.dataset import OpenMLDataset + class DatasetsV1(DatasetsAPI): - def get(self, id): - pass + def get(self, dataset_id: int) -> OpenMLDataset | tuple[OpenMLDataset, Response]: + raise NotImplementedError class DatasetsV2(DatasetsAPI): - def get(self, id): - pass + def get(self, dataset_id: int) -> OpenMLDataset | tuple[OpenMLDataset, Response]: + raise NotImplementedError diff --git a/openml/_api/resources/tasks.py b/openml/_api/resources/tasks.py index b0e9afbf8..f494fb9a3 100644 --- a/openml/_api/resources/tasks.py +++ b/openml/_api/resources/tasks.py @@ -1,5 +1,7 @@ from __future__ import annotations +from typing import TYPE_CHECKING + import xmltodict from openml._api.resources.base import TasksAPI @@ -12,12 +14,20 @@ TaskType, ) +if TYPE_CHECKING: + from requests import Response + class TasksV1(TasksAPI): - def get(self, id, return_response=False): - path = f"task/{id}" + def get( + self, + task_id: int, + *, + return_response: bool = False, + ) -> OpenMLTask | tuple[OpenMLTask, Response]: + path = f"task/{task_id}" response = self._http.get(path) - xml_content = response.content + xml_content = response.text task = self._create_task_from_xml(xml_content) if return_response: @@ -109,5 +119,10 @@ def _create_task_from_xml(self, xml: str) -> OpenMLTask: class TasksV2(TasksAPI): - def get(self, id): - pass + def get( + self, + task_id: int, + *, + return_response: bool = False, + ) -> OpenMLTask | tuple[OpenMLTask, Response]: + raise NotImplementedError diff --git a/openml/_api/runtime/__init__.py b/openml/_api/runtime/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index 80f35587c..aa09a69db 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -1,5 +1,7 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from openml._api.config import ( API_V1_SERVER, API_V2_SERVER, @@ -11,16 +13,18 @@ TasksV1, TasksV2, ) -from openml._api.runtime.fallback import FallbackProxy + +if TYPE_CHECKING: + from openml._api.resources.base import DatasetsAPI, TasksAPI class APIBackend: - def __init__(self, *, datasets, tasks): + def __init__(self, *, datasets: DatasetsAPI, tasks: TasksAPI): self.datasets = datasets self.tasks = tasks -def build_backend(version: str, strict: bool) -> APIBackend: +def build_backend(version: str, *, strict: bool) -> APIBackend: v1_http = HTTPClient(API_V1_SERVER) v2_http = HTTPClient(API_V2_SERVER) @@ -40,19 +44,16 @@ def build_backend(version: str, strict: bool) -> APIBackend: if strict: return v2 - return APIBackend( - datasets=FallbackProxy(v2.datasets, v1.datasets), - tasks=FallbackProxy(v2.tasks, v1.tasks), - ) + return v1 class APIContext: - def __init__(self): + def __init__(self) -> None: self._backend = build_backend("v1", strict=False) - def set_version(self, version: str, strict: bool = False): - self._backend = build_backend(version, strict) + def set_version(self, version: str, *, strict: bool = False) -> None: + self._backend = build_backend(version=version, strict=strict) @property - def backend(self): + def backend(self) -> APIBackend: return self._backend diff --git a/openml/_api/runtime/fallback.py b/openml/_api/runtime/fallback.py index 56e96a966..1bc99d270 100644 --- a/openml/_api/runtime/fallback.py +++ b/openml/_api/runtime/fallback.py @@ -1,5 +1,12 @@ from __future__ import annotations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from openml._api.resources.base import ResourceAPI + class FallbackProxy: - pass + def __init__(self, primary: ResourceAPI, fallback: ResourceAPI): + self._primary = primary + self._fallback = fallback diff --git a/openml/tasks/functions.py b/openml/tasks/functions.py index ef67f75bf..a794ad56d 100644 --- a/openml/tasks/functions.py +++ b/openml/tasks/functions.py @@ -445,10 +445,14 @@ def _get_task_description(task_id: int) -> OpenMLTask: except OpenMLCacheException: _cache_dir = openml.utils._create_cache_directory_for_id(TASKS_CACHE_DIR_NAME, task_id) xml_file = _cache_dir / "task.xml" - task, response = api_context.backend.tasks.get(task_id, return_response=True) - - with xml_file.open("w", encoding="utf8") as fh: - fh.write(response.text) + result = api_context.backend.tasks.get(task_id, return_response=True) + + if isinstance(result, tuple): + task, response = result + with xml_file.open("w", encoding="utf8") as fh: + fh.write(response.text) + else: + task = result return task From 5dfcbce55a027d19cd502ea7bb3d521c2b1bca29 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Wed, 7 Jan 2026 22:14:31 +0500 Subject: [PATCH 03/54] refactor --- openml/_api/config.py | 62 +++++++++++++++++++++++++++++++++++-- openml/_api/http/client.py | 18 +++++++---- openml/_api/runtime/core.py | 9 ++---- 3 files changed, 74 insertions(+), 15 deletions(-) diff --git a/openml/_api/config.py b/openml/_api/config.py index bd93c3cad..1431f66b1 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -1,5 +1,61 @@ from __future__ import annotations -API_V1_SERVER = "https://www.openml.org/api/v1/xml" -API_V2_SERVER = "http://127.0.0.1:8001" -API_KEY = "..." +from dataclasses import dataclass +from typing import Literal + +DelayMethod = Literal["human", "robot"] + + +@dataclass +class APIConfig: + server: str + base_url: str + key: str + + +@dataclass +class APISettings: + v1: APIConfig + v2: APIConfig + + +@dataclass +class ConnectionConfig: + retries: int = 3 + delay_method: DelayMethod = "human" + delay_time: int = 1 # seconds + + def __post_init__(self) -> None: + if self.delay_method not in ("human", "robot"): + raise ValueError(f"delay_method must be 'human' or 'robot', got {self.delay_method}") + + +@dataclass +class CacheConfig: + dir: str = "~/.openml/cache" + ttl: int = 60 * 60 * 24 * 7 # one week + + +@dataclass +class Settings: + api: APISettings + connection: ConnectionConfig + cache: CacheConfig + + +settings = Settings( + api=APISettings( + v1=APIConfig( + server="https://www.openml.org/", + base_url="api/v1/xml/", + key="...", + ), + v2=APIConfig( + server="http://127.0.0.1:8001/", + base_url="", + key="...", + ), + ), + connection=ConnectionConfig(), + cache=CacheConfig(), +) diff --git a/openml/_api/http/client.py b/openml/_api/http/client.py index dea5de809..74e08c709 100644 --- a/openml/_api/http/client.py +++ b/openml/_api/http/client.py @@ -1,24 +1,30 @@ from __future__ import annotations -from typing import Any, Mapping +from typing import TYPE_CHECKING, Any, Mapping import requests from requests import Response from openml.__version__ import __version__ +if TYPE_CHECKING: + from openml._api.config import APIConfig + class HTTPClient: - def __init__(self, base_url: str) -> None: - self.base_url = base_url + def __init__(self, config: APIConfig) -> None: + self.config = config self.headers: dict[str, str] = {"user-agent": f"openml-python/{__version__}"} + def _create_url(self, path: str) -> str: + return self.config.server + self.config.base_url + path + def get( self, path: str, params: Mapping[str, Any] | None = None, ) -> Response: - url = f"{self.base_url}/{path}" + url = self._create_url(path) return requests.get(url, params=params, headers=self.headers, timeout=10) def post( @@ -27,7 +33,7 @@ def post( data: Mapping[str, Any] | None = None, files: Any = None, ) -> Response: - url = f"{self.base_url}/{path}" + url = self._create_url(path) return requests.post(url, data=data, files=files, headers=self.headers, timeout=10) def delete( @@ -35,5 +41,5 @@ def delete( path: str, params: Mapping[str, Any] | None = None, ) -> Response: - url = f"{self.base_url}/{path}" + url = self._create_url(path) return requests.delete(url, params=params, headers=self.headers, timeout=10) diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index aa09a69db..98b587411 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -2,10 +2,7 @@ from typing import TYPE_CHECKING -from openml._api.config import ( - API_V1_SERVER, - API_V2_SERVER, -) +from openml._api.config import settings from openml._api.http.client import HTTPClient from openml._api.resources import ( DatasetsV1, @@ -25,8 +22,8 @@ def __init__(self, *, datasets: DatasetsAPI, tasks: TasksAPI): def build_backend(version: str, *, strict: bool) -> APIBackend: - v1_http = HTTPClient(API_V1_SERVER) - v2_http = HTTPClient(API_V2_SERVER) + v1_http = HTTPClient(config=settings.api.v1) + v2_http = HTTPClient(config=settings.api.v2) v1 = APIBackend( datasets=DatasetsV1(v1_http), From 2acbe9992cf95bfc103ff4fa0c360a58c1842870 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Wed, 7 Jan 2026 22:24:03 +0500 Subject: [PATCH 04/54] implement cache_dir --- openml/_api/http/client.py | 74 +++++++++++++++++++++++++++++++++----- 1 file changed, 66 insertions(+), 8 deletions(-) diff --git a/openml/_api/http/client.py b/openml/_api/http/client.py index 74e08c709..49b05c88e 100644 --- a/openml/_api/http/client.py +++ b/openml/_api/http/client.py @@ -1,36 +1,93 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Mapping +from pathlib import Path +from typing import TYPE_CHECKING, Any +from urllib.parse import urlencode, urljoin, urlparse import requests from requests import Response from openml.__version__ import __version__ +from openml._api.config import settings if TYPE_CHECKING: from openml._api.config import APIConfig -class HTTPClient: +class CacheMixin: + @property + def dir(self) -> str: + return settings.cache.dir + + @property + def ttl(self) -> int: + return settings.cache.ttl + + def _get_cache_directory(self, url: str, params: dict[str, Any]) -> Path: + parsed_url = urlparse(url) + netloc_parts = parsed_url.netloc.split(".")[::-1] # reverse domain + path_parts = parsed_url.path.strip("/").split("/") + + # remove api_key and serialize params if any + filtered_params = {k: v for k, v in params.items() if k != "api_key"} + params_part = [urlencode(filtered_params)] if filtered_params else [] + + return Path(self.dir).joinpath(*netloc_parts, *path_parts, *params_part) + + def _get_cache_response(self, url: str, params: dict[str, Any]) -> Response | None: # noqa: ARG002 + return None + + def _set_cache_response(self, url: str, params: dict[str, Any], response: Response) -> None: # noqa: ARG002 + return None + + +class HTTPClient(CacheMixin): def __init__(self, config: APIConfig) -> None: self.config = config self.headers: dict[str, str] = {"user-agent": f"openml-python/{__version__}"} - def _create_url(self, path: str) -> str: - return self.config.server + self.config.base_url + path + @property + def server(self) -> str: + return self.config.server + + @property + def base_url(self) -> str: + return self.config.base_url + + def _create_url(self, path: str) -> Any: + return urljoin(self.server, urljoin(self.base_url, path)) def get( self, path: str, - params: Mapping[str, Any] | None = None, + *, + params: dict[str, Any] | None = None, + use_cache: bool = False, + use_api_key: bool = False, ) -> Response: url = self._create_url(path) - return requests.get(url, params=params, headers=self.headers, timeout=10) + params = dict(params) if params is not None else {} + + if use_api_key: + params["api_key"] = self.config.key + + if use_cache: + response = self._get_cache_response(url, params) + if response: + return response + + response = requests.get(url, params=params, headers=self.headers, timeout=10) + + if use_cache: + self._set_cache_response(url, params, response) + + return response def post( self, path: str, - data: Mapping[str, Any] | None = None, + *, + data: dict[str, Any] | None = None, files: Any = None, ) -> Response: url = self._create_url(path) @@ -39,7 +96,8 @@ def post( def delete( self, path: str, - params: Mapping[str, Any] | None = None, + *, + params: dict[str, Any] | None = None, ) -> Response: url = self._create_url(path) return requests.delete(url, params=params, headers=self.headers, timeout=10) From af99880a9e16a49833c63084c9e9267c112b6b91 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Wed, 7 Jan 2026 23:42:17 +0500 Subject: [PATCH 05/54] refactor --- openml/_api/config.py | 1 + openml/_api/http/client.py | 100 +++++++++++++++++++++++++++---------- 2 files changed, 75 insertions(+), 26 deletions(-) diff --git a/openml/_api/config.py b/openml/_api/config.py index 1431f66b1..848fe8da1 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -11,6 +11,7 @@ class APIConfig: server: str base_url: str key: str + timeout: int = 10 # seconds @dataclass diff --git a/openml/_api/http/client.py b/openml/_api/http/client.py index 49b05c88e..a90e93933 100644 --- a/openml/_api/http/client.py +++ b/openml/_api/http/client.py @@ -23,7 +23,7 @@ def dir(self) -> str: def ttl(self) -> int: return settings.cache.ttl - def _get_cache_directory(self, url: str, params: dict[str, Any]) -> Path: + def _get_cache_dir(self, url: str, params: dict[str, Any]) -> Path: parsed_url = urlparse(url) netloc_parts = parsed_url.netloc.split(".")[::-1] # reverse domain path_parts = parsed_url.path.strip("/").split("/") @@ -34,10 +34,10 @@ def _get_cache_directory(self, url: str, params: dict[str, Any]) -> Path: return Path(self.dir).joinpath(*netloc_parts, *path_parts, *params_part) - def _get_cache_response(self, url: str, params: dict[str, Any]) -> Response | None: # noqa: ARG002 - return None + def _get_cache_response(self, cache_dir: Path) -> Response: # noqa: ARG002 + return Response() - def _set_cache_response(self, url: str, params: dict[str, Any], response: Response) -> None: # noqa: ARG002 + def _set_cache_response(self, cache_dir: Path, response: Response) -> None: # noqa: ARG002 return None @@ -54,50 +54,98 @@ def server(self) -> str: def base_url(self) -> str: return self.config.base_url - def _create_url(self, path: str) -> Any: - return urljoin(self.server, urljoin(self.base_url, path)) + @property + def key(self) -> str: + return self.config.key - def get( + @property + def timeout(self) -> int: + return self.config.timeout + + def request( self, + method: str, path: str, *, - params: dict[str, Any] | None = None, use_cache: bool = False, use_api_key: bool = False, + **request_kwargs: Any, ) -> Response: - url = self._create_url(path) - params = dict(params) if params is not None else {} + url = urljoin(self.server, urljoin(self.base_url, path)) + params = request_kwargs.pop("params", {}) + params = params.copy() if use_api_key: - params["api_key"] = self.config.key + params["api_key"] = self.key - if use_cache: - response = self._get_cache_response(url, params) - if response: - return response + headers = request_kwargs.pop("headers", {}) + headers = headers.copy() + headers.update(self.headers) + + timeout = request_kwargs.pop("timeout", self.timeout) + cache_dir = self._get_cache_dir(url, params) - response = requests.get(url, params=params, headers=self.headers, timeout=10) + if use_cache: + try: + return self._get_cache_response(cache_dir) + # TODO: handle ttl expired error + except Exception: + raise + + response = requests.request( + method=method, + url=url, + params=params, + headers=headers, + timeout=timeout, + **request_kwargs, + ) if use_cache: - self._set_cache_response(url, params, response) + self._set_cache_response(cache_dir, response) return response - def post( + def get( self, path: str, *, - data: dict[str, Any] | None = None, - files: Any = None, + use_cache: bool = False, + use_api_key: bool = False, + **request_kwargs: Any, ) -> Response: - url = self._create_url(path) - return requests.post(url, data=data, files=files, headers=self.headers, timeout=10) + # TODO: remove override when cache is implemented + use_cache = False + return self.request( + method="GET", + path=path, + use_cache=use_cache, + use_api_key=use_api_key, + **request_kwargs, + ) + + def post( + self, + path: str, + **request_kwargs: Any, + ) -> Response: + return self.request( + method="POST", + path=path, + use_cache=False, + use_api_key=True, + **request_kwargs, + ) def delete( self, path: str, - *, - params: dict[str, Any] | None = None, + **request_kwargs: Any, ) -> Response: - url = self._create_url(path) - return requests.delete(url, params=params, headers=self.headers, timeout=10) + return self.request( + method="DELETE", + path=path, + use_cache=False, + use_api_key=True, + **request_kwargs, + ) From 4c75e16890a76d8fbc0ddc125a267d23ddaded44 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Thu, 15 Jan 2026 14:51:22 +0500 Subject: [PATCH 06/54] undo changes in tasks/functions.py --- openml/tasks/functions.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/openml/tasks/functions.py b/openml/tasks/functions.py index a794ad56d..e9b879ae4 100644 --- a/openml/tasks/functions.py +++ b/openml/tasks/functions.py @@ -12,7 +12,6 @@ import openml._api_calls import openml.utils -from openml._api import api_context from openml.datasets import get_dataset from openml.exceptions import OpenMLCacheException @@ -445,16 +444,11 @@ def _get_task_description(task_id: int) -> OpenMLTask: except OpenMLCacheException: _cache_dir = openml.utils._create_cache_directory_for_id(TASKS_CACHE_DIR_NAME, task_id) xml_file = _cache_dir / "task.xml" - result = api_context.backend.tasks.get(task_id, return_response=True) + task_xml = openml._api_calls._perform_api_call("task/%d" % task_id, "get") - if isinstance(result, tuple): - task, response = result - with xml_file.open("w", encoding="utf8") as fh: - fh.write(response.text) - else: - task = result - - return task + with xml_file.open("w", encoding="utf8") as fh: + fh.write(task_xml) + return _create_task_from_xml(task_xml) def _create_task_from_xml(xml: str) -> OpenMLTask: From c6033832e8008d0d8f94fa196d519e35f24030c3 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Wed, 21 Jan 2026 10:47:26 +0500 Subject: [PATCH 07/54] add tests directory --- tests/test_api/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/test_api/__init__.py diff --git a/tests/test_api/__init__.py b/tests/test_api/__init__.py new file mode 100644 index 000000000..e69de29bb From ff6a8b05314e74bba7ad64388304a3708f83dbf0 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Wed, 21 Jan 2026 11:40:23 +0500 Subject: [PATCH 08/54] use enum for delay method --- openml/_api/config.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/openml/_api/config.py b/openml/_api/config.py index 848fe8da1..13063df7a 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -1,9 +1,12 @@ from __future__ import annotations from dataclasses import dataclass -from typing import Literal +from enum import Enum -DelayMethod = Literal["human", "robot"] + +class DelayMethod(str, Enum): + HUMAN = "human" + ROBOT = "robot" @dataclass @@ -23,13 +26,9 @@ class APISettings: @dataclass class ConnectionConfig: retries: int = 3 - delay_method: DelayMethod = "human" + delay_method: DelayMethod = DelayMethod.HUMAN delay_time: int = 1 # seconds - def __post_init__(self) -> None: - if self.delay_method not in ("human", "robot"): - raise ValueError(f"delay_method must be 'human' or 'robot', got {self.delay_method}") - @dataclass class CacheConfig: From f01898fe88b397b0c981398650664e3ecb3f9b08 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Wed, 21 Jan 2026 11:41:33 +0500 Subject: [PATCH 09/54] implement cache --- openml/_api/http/client.py | 76 ++++++++++++++++++++++++++++++++++---- 1 file changed, 69 insertions(+), 7 deletions(-) diff --git a/openml/_api/http/client.py b/openml/_api/http/client.py index a90e93933..f76efe5a1 100644 --- a/openml/_api/http/client.py +++ b/openml/_api/http/client.py @@ -1,5 +1,7 @@ from __future__ import annotations +import json +import time from pathlib import Path from typing import TYPE_CHECKING, Any from urllib.parse import urlencode, urljoin, urlparse @@ -34,11 +36,70 @@ def _get_cache_dir(self, url: str, params: dict[str, Any]) -> Path: return Path(self.dir).joinpath(*netloc_parts, *path_parts, *params_part) - def _get_cache_response(self, cache_dir: Path) -> Response: # noqa: ARG002 - return Response() + def _get_cache_response(self, cache_dir: Path) -> Response: + if not cache_dir.exists(): + raise FileNotFoundError(f"Cache directory not found: {cache_dir}") - def _set_cache_response(self, cache_dir: Path, response: Response) -> None: # noqa: ARG002 - return None + meta_path = cache_dir / "meta.json" + headers_path = cache_dir / "headers.json" + body_path = cache_dir / "body.bin" + + if not (meta_path.exists() and headers_path.exists() and body_path.exists()): + raise FileNotFoundError(f"Incomplete cache at {cache_dir}") + + with meta_path.open("r", encoding="utf-8") as f: + meta = json.load(f) + + created_at = meta.get("created_at") + if created_at is None: + raise ValueError("Cache metadata missing 'created_at'") + + if time.time() - created_at > self.ttl: + raise TimeoutError(f"Cache expired for {cache_dir}") + + with headers_path.open("r", encoding="utf-8") as f: + headers = json.load(f) + + body = body_path.read_bytes() + + response = Response() + response.status_code = meta["status_code"] + response.url = meta["url"] + response.reason = meta["reason"] + response.headers = headers + response._content = body + response.encoding = meta["encoding"] + + return response + + def _set_cache_response(self, cache_dir: Path, response: Response) -> None: + cache_dir.mkdir(parents=True, exist_ok=True) + + # body + (cache_dir / "body.bin").write_bytes(response.content) + + # headers + with (cache_dir / "headers.json").open("w", encoding="utf-8") as f: + json.dump(dict(response.headers), f) + + # meta + meta = { + "status_code": response.status_code, + "url": response.url, + "reason": response.reason, + "encoding": response.encoding, + "elapsed": response.elapsed.total_seconds(), + "created_at": time.time(), + "request": { + "method": response.request.method if response.request else None, + "url": response.request.url if response.request else None, + "headers": dict(response.request.headers) if response.request else None, + "body": response.request.body if response.request else None, + }, + } + + with (cache_dir / "meta.json").open("w", encoding="utf-8") as f: + json.dump(meta, f) class HTTPClient(CacheMixin): @@ -88,7 +149,10 @@ def request( if use_cache: try: return self._get_cache_response(cache_dir) - # TODO: handle ttl expired error + except FileNotFoundError: + pass + except TimeoutError: + pass except Exception: raise @@ -114,8 +178,6 @@ def get( use_api_key: bool = False, **request_kwargs: Any, ) -> Response: - # TODO: remove override when cache is implemented - use_cache = False return self.request( method="GET", path=path, From 5c4511e60b0bc50aba2509bc48bb931082b0caf5 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Wed, 21 Jan 2026 13:36:05 +0500 Subject: [PATCH 10/54] refactor clients --- openml/_api/clients/__init__.py | 6 + .../_api/{http/client.py => clients/http.py} | 126 +++++++++--------- .../_api/{http/utils.py => clients/minio.py} | 0 openml/_api/config.py | 6 +- openml/_api/http/__init__.py | 3 - openml/_api/runtime/core.py | 37 ++++- 6 files changed, 101 insertions(+), 77 deletions(-) create mode 100644 openml/_api/clients/__init__.py rename openml/_api/{http/client.py => clients/http.py} (61%) rename openml/_api/{http/utils.py => clients/minio.py} (100%) delete mode 100644 openml/_api/http/__init__.py diff --git a/openml/_api/clients/__init__.py b/openml/_api/clients/__init__.py new file mode 100644 index 000000000..8a5ff94e4 --- /dev/null +++ b/openml/_api/clients/__init__.py @@ -0,0 +1,6 @@ +from .http import HTTPCache, HTTPClient + +__all__ = [ + "HTTPCache", + "HTTPClient", +] diff --git a/openml/_api/http/client.py b/openml/_api/clients/http.py similarity index 61% rename from openml/_api/http/client.py rename to openml/_api/clients/http.py index f76efe5a1..4e126ee92 100644 --- a/openml/_api/http/client.py +++ b/openml/_api/clients/http.py @@ -10,42 +10,41 @@ from requests import Response from openml.__version__ import __version__ -from openml._api.config import settings if TYPE_CHECKING: - from openml._api.config import APIConfig + from openml._api.config import DelayMethod -class CacheMixin: - @property - def dir(self) -> str: - return settings.cache.dir +class HTTPCache: + def __init__(self, *, path: Path, ttl: int) -> None: + self.path = path + self.ttl = ttl - @property - def ttl(self) -> int: - return settings.cache.ttl - - def _get_cache_dir(self, url: str, params: dict[str, Any]) -> Path: + def get_key(self, url: str, params: dict[str, Any]) -> str: parsed_url = urlparse(url) - netloc_parts = parsed_url.netloc.split(".")[::-1] # reverse domain + netloc_parts = parsed_url.netloc.split(".")[::-1] path_parts = parsed_url.path.strip("/").split("/") - # remove api_key and serialize params if any filtered_params = {k: v for k, v in params.items() if k != "api_key"} params_part = [urlencode(filtered_params)] if filtered_params else [] - return Path(self.dir).joinpath(*netloc_parts, *path_parts, *params_part) + return str(Path(*netloc_parts, *path_parts, *params_part)) + + def _key_to_path(self, key: str) -> Path: + return self.path.joinpath(key) + + def load(self, key: str) -> Response: + path = self._key_to_path(key) - def _get_cache_response(self, cache_dir: Path) -> Response: - if not cache_dir.exists(): - raise FileNotFoundError(f"Cache directory not found: {cache_dir}") + if not path.exists(): + raise FileNotFoundError(f"Cache directory not found: {path}") - meta_path = cache_dir / "meta.json" - headers_path = cache_dir / "headers.json" - body_path = cache_dir / "body.bin" + meta_path = path / "meta.json" + headers_path = path / "headers.json" + body_path = path / "body.bin" if not (meta_path.exists() and headers_path.exists() and body_path.exists()): - raise FileNotFoundError(f"Incomplete cache at {cache_dir}") + raise FileNotFoundError(f"Incomplete cache at {path}") with meta_path.open("r", encoding="utf-8") as f: meta = json.load(f) @@ -55,7 +54,7 @@ def _get_cache_response(self, cache_dir: Path) -> Response: raise ValueError("Cache metadata missing 'created_at'") if time.time() - created_at > self.ttl: - raise TimeoutError(f"Cache expired for {cache_dir}") + raise TimeoutError(f"Cache expired for {path}") with headers_path.open("r", encoding="utf-8") as f: headers = json.load(f) @@ -72,17 +71,15 @@ def _get_cache_response(self, cache_dir: Path) -> Response: return response - def _set_cache_response(self, cache_dir: Path, response: Response) -> None: - cache_dir.mkdir(parents=True, exist_ok=True) + def save(self, key: str, response: Response) -> None: + path = self._key_to_path(key) + path.mkdir(parents=True, exist_ok=True) - # body - (cache_dir / "body.bin").write_bytes(response.content) + (path / "body.bin").write_bytes(response.content) - # headers - with (cache_dir / "headers.json").open("w", encoding="utf-8") as f: + with (path / "headers.json").open("w", encoding="utf-8") as f: json.dump(dict(response.headers), f) - # meta meta = { "status_code": response.status_code, "url": response.url, @@ -98,30 +95,33 @@ def _set_cache_response(self, cache_dir: Path, response: Response) -> None: }, } - with (cache_dir / "meta.json").open("w", encoding="utf-8") as f: + with (path / "meta.json").open("w", encoding="utf-8") as f: json.dump(meta, f) -class HTTPClient(CacheMixin): - def __init__(self, config: APIConfig) -> None: - self.config = config - self.headers: dict[str, str] = {"user-agent": f"openml-python/{__version__}"} - - @property - def server(self) -> str: - return self.config.server - - @property - def base_url(self) -> str: - return self.config.base_url - - @property - def key(self) -> str: - return self.config.key +class HTTPClient: + def __init__( # noqa: PLR0913 + self, + *, + server: str, + base_url: str, + api_key: str, + timeout: int, + retries: int, + delay_method: DelayMethod, + delay_time: int, + cache: HTTPCache | None = None, + ) -> None: + self.server = server + self.base_url = base_url + self.api_key = api_key + self.timeout = timeout + self.retries = retries + self.delay_method = delay_method + self.delay_time = delay_time + self.cache = cache - @property - def timeout(self) -> int: - return self.config.timeout + self.headers: dict[str, str] = {"user-agent": f"openml-python/{__version__}"} def request( self, @@ -134,27 +134,25 @@ def request( ) -> Response: url = urljoin(self.server, urljoin(self.base_url, path)) - params = request_kwargs.pop("params", {}) - params = params.copy() + # prepare params + params = request_kwargs.pop("params", {}).copy() if use_api_key: - params["api_key"] = self.key + params["api_key"] = self.api_key - headers = request_kwargs.pop("headers", {}) - headers = headers.copy() + # prepare headers + headers = request_kwargs.pop("headers", {}).copy() headers.update(self.headers) timeout = request_kwargs.pop("timeout", self.timeout) - cache_dir = self._get_cache_dir(url, params) - if use_cache: + if use_cache and self.cache is not None: + cache_key = self.cache.get_key(url, params) try: - return self._get_cache_response(cache_dir) - except FileNotFoundError: - pass - except TimeoutError: - pass + return self.cache.load(cache_key) + except (FileNotFoundError, TimeoutError): + pass # cache miss or expired, continue except Exception: - raise + raise # propagate unexpected cache errors response = requests.request( method=method, @@ -165,8 +163,8 @@ def request( **request_kwargs, ) - if use_cache: - self._set_cache_response(cache_dir, response) + if use_cache and self.cache is not None: + self.cache.save(cache_key, response) return response diff --git a/openml/_api/http/utils.py b/openml/_api/clients/minio.py similarity index 100% rename from openml/_api/http/utils.py rename to openml/_api/clients/minio.py diff --git a/openml/_api/config.py b/openml/_api/config.py index 13063df7a..aa153a556 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -13,7 +13,7 @@ class DelayMethod(str, Enum): class APIConfig: server: str base_url: str - key: str + api_key: str timeout: int = 10 # seconds @@ -48,12 +48,12 @@ class Settings: v1=APIConfig( server="https://www.openml.org/", base_url="api/v1/xml/", - key="...", + api_key="...", ), v2=APIConfig( server="http://127.0.0.1:8001/", base_url="", - key="...", + api_key="...", ), ), connection=ConnectionConfig(), diff --git a/openml/_api/http/__init__.py b/openml/_api/http/__init__.py deleted file mode 100644 index 8e6d1e4ce..000000000 --- a/openml/_api/http/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from openml._api.http.client import HTTPClient - -__all__ = ["HTTPClient"] diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index 98b587411..483b74d3d 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -1,9 +1,10 @@ from __future__ import annotations +from pathlib import Path from typing import TYPE_CHECKING +from openml._api.clients import HTTPCache, HTTPClient from openml._api.config import settings -from openml._api.http.client import HTTPClient from openml._api.resources import ( DatasetsV1, DatasetsV2, @@ -22,20 +23,42 @@ def __init__(self, *, datasets: DatasetsAPI, tasks: TasksAPI): def build_backend(version: str, *, strict: bool) -> APIBackend: - v1_http = HTTPClient(config=settings.api.v1) - v2_http = HTTPClient(config=settings.api.v2) + http_cache = HTTPCache( + path=Path(settings.cache.dir), + ttl=settings.cache.ttl, + ) + v1_http_client = HTTPClient( + server=settings.api.v1.server, + base_url=settings.api.v1.base_url, + api_key=settings.api.v1.api_key, + timeout=settings.api.v1.timeout, + retries=settings.connection.retries, + delay_method=settings.connection.delay_method, + delay_time=settings.connection.delay_time, + cache=http_cache, + ) + v2_http_client = HTTPClient( + server=settings.api.v2.server, + base_url=settings.api.v2.base_url, + api_key=settings.api.v2.api_key, + timeout=settings.api.v2.timeout, + retries=settings.connection.retries, + delay_method=settings.connection.delay_method, + delay_time=settings.connection.delay_time, + cache=http_cache, + ) v1 = APIBackend( - datasets=DatasetsV1(v1_http), - tasks=TasksV1(v1_http), + datasets=DatasetsV1(v1_http_client), + tasks=TasksV1(v1_http_client), ) if version == "v1": return v1 v2 = APIBackend( - datasets=DatasetsV2(v2_http), - tasks=TasksV2(v2_http), + datasets=DatasetsV2(v2_http_client), + tasks=TasksV2(v2_http_client), ) if strict: From 43276d2ac56ba39d195b5d54d72bed2e61da3f79 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Fri, 23 Jan 2026 12:17:53 +0500 Subject: [PATCH 11/54] fix import in resources/base.py --- openml/_api/resources/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openml/_api/resources/base.py b/openml/_api/resources/base.py index 6fbf8977d..54b40a0e0 100644 --- a/openml/_api/resources/base.py +++ b/openml/_api/resources/base.py @@ -6,7 +6,7 @@ if TYPE_CHECKING: from requests import Response - from openml._api.http import HTTPClient + from openml._api.clients import HTTPClient from openml.datasets.dataset import OpenMLDataset from openml.tasks.task import OpenMLTask From 1206f697d09df82ed7f18bfea94a476844e01cb4 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 26 Jan 2026 13:52:20 +0500 Subject: [PATCH 12/54] refactor and add exception handling --- openml/_api/clients/http.py | 241 +++++++++++++++++++++++++++++++++--- openml/_api/config.py | 5 +- openml/_api/runtime/core.py | 6 +- 3 files changed, 229 insertions(+), 23 deletions(-) diff --git a/openml/_api/clients/http.py b/openml/_api/clients/http.py index 4e126ee92..dc184074d 100644 --- a/openml/_api/clients/http.py +++ b/openml/_api/clients/http.py @@ -1,18 +1,28 @@ from __future__ import annotations import json +import logging +import math +import random import time +import xml +from collections.abc import Mapping from pathlib import Path -from typing import TYPE_CHECKING, Any +from typing import Any from urllib.parse import urlencode, urljoin, urlparse import requests +import xmltodict from requests import Response from openml.__version__ import __version__ - -if TYPE_CHECKING: - from openml._api.config import DelayMethod +from openml._api.config import RetryPolicy +from openml.exceptions import ( + OpenMLNotAuthorizedError, + OpenMLServerError, + OpenMLServerException, + OpenMLServerNoResult, +) class HTTPCache: @@ -108,8 +118,7 @@ def __init__( # noqa: PLR0913 api_key: str, timeout: int, retries: int, - delay_method: DelayMethod, - delay_time: int, + retry_policy: RetryPolicy, cache: HTTPCache | None = None, ) -> None: self.server = server @@ -117,12 +126,194 @@ def __init__( # noqa: PLR0913 self.api_key = api_key self.timeout = timeout self.retries = retries - self.delay_method = delay_method - self.delay_time = delay_time + self.retry_policy = retry_policy self.cache = cache + self.retry_func = ( + self._human_delay if retry_policy == RetryPolicy.HUMAN else self._robot_delay + ) self.headers: dict[str, str] = {"user-agent": f"openml-python/{__version__}"} + def _robot_delay(self, n: int) -> float: + wait = (1 / (1 + math.exp(-(n * 0.5 - 4)))) * 60 + variation = random.gauss(0, wait / 10) + return max(1.0, wait + variation) + + def _human_delay(self, n: int) -> float: + return max(1.0, n) + + def _parse_exception_response( + self, + response: Response, + ) -> tuple[int | None, str]: + content_type = response.headers.get("Content-Type", "").lower() + + if "json" in content_type: + server_exception = response.json() + server_error = server_exception["detail"] + code = server_error.get("code") + message = server_error.get("message") + additional_information = server_error.get("additional_information") + else: + server_exception = xmltodict.parse(response.text) + server_error = server_exception["oml:error"] + code = server_error.get("oml:code") + message = server_error.get("oml:message") + additional_information = server_error.get("oml:additional_information") + + if code is not None: + code = int(code) + + if message and additional_information: + full_message = f"{message} - {additional_information}" + elif message: + full_message = message + elif additional_information: + full_message = additional_information + else: + full_message = "" + + return code, full_message + + def _raise_code_specific_error( + self, + code: int, + message: str, + url: str, + files: Mapping[str, Any] | None, + ) -> None: + if code in [111, 372, 512, 500, 482, 542, 674]: + # 512 for runs, 372 for datasets, 500 for flows + # 482 for tasks, 542 for evaluations, 674 for setups + # 111 for dataset descriptions + raise OpenMLServerNoResult(code=code, message=message, url=url) + + # 163: failure to validate flow XML (https://www.openml.org/api_docs#!/flow/post_flow) + if code in [163] and files is not None and "description" in files: + # file_elements['description'] is the XML file description of the flow + message = f"\n{files['description']}\n{message}" + + if code in [ + 102, # flow/exists post + 137, # dataset post + 350, # dataset/42 delete + 310, # flow/ post + 320, # flow/42 delete + 400, # run/42 delete + 460, # task/42 delete + ]: + raise OpenMLNotAuthorizedError( + message=( + f"The API call {url} requires authentication via an API key.\nPlease configure " + "OpenML-Python to use your API as described in this example:" + "\nhttps://openml.github.io/openml-python/latest/examples/Basics/introduction_tutorial/#authentication" + ) + ) + + # Propagate all server errors to the calling functions, except + # for 107 which represents a database connection error. + # These are typically caused by high server load, + # which means trying again might resolve the issue. + # DATABASE_CONNECTION_ERRCODE + if code != 107: + raise OpenMLServerException(code=code, message=message, url=url) + + def _validate_response( + self, + method: str, + url: str, + files: Mapping[str, Any] | None, + response: Response, + ) -> Exception | None: + if ( + "Content-Encoding" not in response.headers + or response.headers["Content-Encoding"] != "gzip" + ): + logging.warning(f"Received uncompressed content from OpenML for {url}.") + + if response.status_code == 200: + return None + + if response.status_code == requests.codes.URI_TOO_LONG: + raise OpenMLServerError(f"URI too long! ({url})") + + retry_raise_e: Exception | None = None + + try: + code, message = self._parse_exception_response(response) + + except (requests.exceptions.JSONDecodeError, xml.parsers.expat.ExpatError) as e: + if method != "GET": + extra = f"Status code: {response.status_code}\n{response.text}" + raise OpenMLServerError( + f"Unexpected server error when calling {url}. Please contact the " + f"developers!\n{extra}" + ) from e + + retry_raise_e = e + + except Exception as e: + # If we failed to parse it out, + # then something has gone wrong in the body we have sent back + # from the server and there is little extra information we can capture. + raise OpenMLServerError( + f"Unexpected server error when calling {url}. Please contact the developers!\n" + f"Status code: {response.status_code}\n{response.text}", + ) from e + + if code is not None: + self._raise_code_specific_error( + code=code, + message=message, + url=url, + files=files, + ) + + if retry_raise_e is None: + retry_raise_e = OpenMLServerException(code=code, message=message, url=url) + + return retry_raise_e + + def _request( # noqa: PLR0913 + self, + method: str, + url: str, + params: Mapping[str, Any], + headers: Mapping[str, str], + timeout: float | int, + files: Mapping[str, Any] | None, + **request_kwargs: Any, + ) -> tuple[Response | None, Exception | None]: + retry_raise_e: Exception | None = None + response: Response | None = None + + try: + response = requests.request( + method=method, + url=url, + params=params, + headers=headers, + timeout=timeout, + files=files, + **request_kwargs, + ) + except ( + requests.exceptions.ChunkedEncodingError, + requests.exceptions.ConnectionError, + requests.exceptions.SSLError, + ) as e: + retry_raise_e = e + + if response is not None: + retry_raise_e = self._validate_response( + method=method, + url=url, + files=files, + response=response, + ) + + return response, retry_raise_e + def request( self, method: str, @@ -133,6 +324,7 @@ def request( **request_kwargs: Any, ) -> Response: url = urljoin(self.server, urljoin(self.base_url, path)) + retries = max(1, self.retries) # prepare params params = request_kwargs.pop("params", {}).copy() @@ -144,6 +336,9 @@ def request( headers.update(self.headers) timeout = request_kwargs.pop("timeout", self.timeout) + files = request_kwargs.pop("files", None) + + use_cache = False if use_cache and self.cache is not None: cache_key = self.cache.get_key(url, params) @@ -154,14 +349,28 @@ def request( except Exception: raise # propagate unexpected cache errors - response = requests.request( - method=method, - url=url, - params=params, - headers=headers, - timeout=timeout, - **request_kwargs, - ) + for retry_counter in range(1, retries + 1): + response, retry_raise_e = self._request( + method=method, + url=url, + params=params, + headers=headers, + timeout=timeout, + files=files, + **request_kwargs, + ) + + # executed successfully + if retry_raise_e is None: + break + # tries completed + if retry_counter >= retries: + raise retry_raise_e + + delay = self.retry_func(retry_counter) + time.sleep(delay) + + assert response is not None if use_cache and self.cache is not None: self.cache.save(cache_key, response) diff --git a/openml/_api/config.py b/openml/_api/config.py index aa153a556..6cce06403 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -4,7 +4,7 @@ from enum import Enum -class DelayMethod(str, Enum): +class RetryPolicy(str, Enum): HUMAN = "human" ROBOT = "robot" @@ -26,8 +26,7 @@ class APISettings: @dataclass class ConnectionConfig: retries: int = 3 - delay_method: DelayMethod = DelayMethod.HUMAN - delay_time: int = 1 # seconds + retry_policy: RetryPolicy = RetryPolicy.HUMAN @dataclass diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index 483b74d3d..25f2649ee 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -33,8 +33,7 @@ def build_backend(version: str, *, strict: bool) -> APIBackend: api_key=settings.api.v1.api_key, timeout=settings.api.v1.timeout, retries=settings.connection.retries, - delay_method=settings.connection.delay_method, - delay_time=settings.connection.delay_time, + retry_policy=settings.connection.retry_policy, cache=http_cache, ) v2_http_client = HTTPClient( @@ -43,8 +42,7 @@ def build_backend(version: str, *, strict: bool) -> APIBackend: api_key=settings.api.v2.api_key, timeout=settings.api.v2.timeout, retries=settings.connection.retries, - delay_method=settings.connection.delay_method, - delay_time=settings.connection.delay_time, + retry_policy=settings.connection.retry_policy, cache=http_cache, ) From 4948e991f96821372934c7132f4a695da165d17b Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 26 Jan 2026 20:43:32 +0500 Subject: [PATCH 13/54] refactor resources/base/ --- openml/_api/resources/base/__init__.py | 13 ++++++ openml/_api/resources/base/base.py | 41 +++++++++++++++++++ .../resources/{base.py => base/resources.py} | 16 ++++---- openml/_api/resources/base/versions.py | 23 +++++++++++ openml/_api/resources/datasets.py | 6 +-- openml/_api/resources/tasks.py | 6 +-- 6 files changed, 91 insertions(+), 14 deletions(-) create mode 100644 openml/_api/resources/base/__init__.py create mode 100644 openml/_api/resources/base/base.py rename openml/_api/resources/{base.py => base/resources.py} (64%) create mode 100644 openml/_api/resources/base/versions.py diff --git a/openml/_api/resources/base/__init__.py b/openml/_api/resources/base/__init__.py new file mode 100644 index 000000000..851cfe942 --- /dev/null +++ b/openml/_api/resources/base/__init__.py @@ -0,0 +1,13 @@ +from openml._api.resources.base.base import APIVersion, ResourceAPI, ResourceType +from openml._api.resources.base.resources import DatasetsAPI, TasksAPI +from openml._api.resources.base.versions import ResourceV1, ResourceV2 + +__all__ = [ + "APIVersion", + "DatasetsAPI", + "ResourceAPI", + "ResourceType", + "ResourceV1", + "ResourceV2", + "TasksAPI", +] diff --git a/openml/_api/resources/base/base.py b/openml/_api/resources/base/base.py new file mode 100644 index 000000000..8d85d054b --- /dev/null +++ b/openml/_api/resources/base/base.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from enum import Enum +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from openml._api.clients import HTTPClient + + +class APIVersion(str, Enum): + V1 = "v1" + V2 = "v2" + + +class ResourceType(str, Enum): + DATASETS = "datasets" + TASKS = "tasks" + + +class ResourceAPI(ABC): + api_version: APIVersion | None = None + resource_type: ResourceType | None = None + + def __init__(self, http: HTTPClient): + self._http = http + + def _raise_not_implemented_error(self, method_name: str | None = None) -> None: + version = getattr(self.api_version, "name", "Unknown version") + resource = getattr(self.resource_type, "name", "Unknown resource") + method_info = f" Method: {method_name}" if method_name else "" + raise NotImplementedError( + f"{self.__class__.__name__}: {version} API does not support this " + f"functionality for resource: {resource}.{method_info}" + ) + + @abstractmethod + def delete(self) -> None: ... + + @abstractmethod + def publish(self) -> None: ... diff --git a/openml/_api/resources/base.py b/openml/_api/resources/base/resources.py similarity index 64% rename from openml/_api/resources/base.py rename to openml/_api/resources/base/resources.py index 54b40a0e0..edb26c91c 100644 --- a/openml/_api/resources/base.py +++ b/openml/_api/resources/base/resources.py @@ -1,27 +1,27 @@ from __future__ import annotations -from abc import ABC, abstractmethod +from abc import abstractmethod from typing import TYPE_CHECKING +from openml._api.resources.base import ResourceAPI, ResourceType + if TYPE_CHECKING: from requests import Response - from openml._api.clients import HTTPClient from openml.datasets.dataset import OpenMLDataset from openml.tasks.task import OpenMLTask -class ResourceAPI: - def __init__(self, http: HTTPClient): - self._http = http - +class DatasetsAPI(ResourceAPI): + resource_type: ResourceType | None = ResourceType.DATASETS -class DatasetsAPI(ResourceAPI, ABC): @abstractmethod def get(self, dataset_id: int) -> OpenMLDataset | tuple[OpenMLDataset, Response]: ... -class TasksAPI(ResourceAPI, ABC): +class TasksAPI(ResourceAPI): + resource_type: ResourceType | None = ResourceType.TASKS + @abstractmethod def get( self, diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py new file mode 100644 index 000000000..8a81517e5 --- /dev/null +++ b/openml/_api/resources/base/versions.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from openml._api.resources.base import APIVersion, ResourceAPI + + +class ResourceV1(ResourceAPI): + api_version: APIVersion | None = APIVersion.V1 + + def delete(self) -> None: + pass + + def publish(self) -> None: + pass + + +class ResourceV2(ResourceAPI): + api_version: APIVersion | None = APIVersion.V2 + + def delete(self) -> None: + self._raise_not_implemented_error("delete") + + def publish(self) -> None: + self._raise_not_implemented_error("publish") diff --git a/openml/_api/resources/datasets.py b/openml/_api/resources/datasets.py index 9ff1ec278..f3a49a84f 100644 --- a/openml/_api/resources/datasets.py +++ b/openml/_api/resources/datasets.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING -from openml._api.resources.base import DatasetsAPI +from openml._api.resources.base import DatasetsAPI, ResourceV1, ResourceV2 if TYPE_CHECKING: from responses import Response @@ -10,11 +10,11 @@ from openml.datasets.dataset import OpenMLDataset -class DatasetsV1(DatasetsAPI): +class DatasetsV1(ResourceV1, DatasetsAPI): def get(self, dataset_id: int) -> OpenMLDataset | tuple[OpenMLDataset, Response]: raise NotImplementedError -class DatasetsV2(DatasetsAPI): +class DatasetsV2(ResourceV2, DatasetsAPI): def get(self, dataset_id: int) -> OpenMLDataset | tuple[OpenMLDataset, Response]: raise NotImplementedError diff --git a/openml/_api/resources/tasks.py b/openml/_api/resources/tasks.py index f494fb9a3..a7ca39208 100644 --- a/openml/_api/resources/tasks.py +++ b/openml/_api/resources/tasks.py @@ -4,7 +4,7 @@ import xmltodict -from openml._api.resources.base import TasksAPI +from openml._api.resources.base import ResourceV1, ResourceV2, TasksAPI from openml.tasks.task import ( OpenMLClassificationTask, OpenMLClusteringTask, @@ -18,7 +18,7 @@ from requests import Response -class TasksV1(TasksAPI): +class TasksV1(ResourceV1, TasksAPI): def get( self, task_id: int, @@ -118,7 +118,7 @@ def _create_task_from_xml(self, xml: str) -> OpenMLTask: return cls(**common_kwargs) # type: ignore -class TasksV2(TasksAPI): +class TasksV2(ResourceV2, TasksAPI): def get( self, task_id: int, From a3541675fd6452e68f268127df7c583bb9c2d0ca Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 26 Jan 2026 21:06:20 +0500 Subject: [PATCH 14/54] implement delete --- openml/_api/resources/base/base.py | 23 +++++--- openml/_api/resources/base/resources.py | 4 +- openml/_api/resources/base/versions.py | 76 ++++++++++++++++++++++--- 3 files changed, 86 insertions(+), 17 deletions(-) diff --git a/openml/_api/resources/base/base.py b/openml/_api/resources/base/base.py index 8d85d054b..9b1803508 100644 --- a/openml/_api/resources/base/base.py +++ b/openml/_api/resources/base/base.py @@ -14,28 +14,37 @@ class APIVersion(str, Enum): class ResourceType(str, Enum): - DATASETS = "datasets" - TASKS = "tasks" + DATASET = "dataset" + TASK = "task" + TASK_TYPE = "task_type" + EVALUATION_MEASURE = "evaluation_measure" + ESTIMATION_PROCEDURE = "estimation_procedure" + EVALUATION = "evaluation" + FLOW = "flow" + STUDY = "study" + RUN = "run" + SETUP = "setup" + USER = "user" class ResourceAPI(ABC): - api_version: APIVersion | None = None - resource_type: ResourceType | None = None + api_version: APIVersion + resource_type: ResourceType def __init__(self, http: HTTPClient): self._http = http - def _raise_not_implemented_error(self, method_name: str | None = None) -> None: + def _get_not_implemented_message(self, method_name: str | None = None) -> str: version = getattr(self.api_version, "name", "Unknown version") resource = getattr(self.resource_type, "name", "Unknown resource") method_info = f" Method: {method_name}" if method_name else "" - raise NotImplementedError( + return ( f"{self.__class__.__name__}: {version} API does not support this " f"functionality for resource: {resource}.{method_info}" ) @abstractmethod - def delete(self) -> None: ... + def delete(self, resource_id: int) -> bool: ... @abstractmethod def publish(self) -> None: ... diff --git a/openml/_api/resources/base/resources.py b/openml/_api/resources/base/resources.py index edb26c91c..55cb95c0d 100644 --- a/openml/_api/resources/base/resources.py +++ b/openml/_api/resources/base/resources.py @@ -13,14 +13,14 @@ class DatasetsAPI(ResourceAPI): - resource_type: ResourceType | None = ResourceType.DATASETS + resource_type: ResourceType = ResourceType.DATASET @abstractmethod def get(self, dataset_id: int) -> OpenMLDataset | tuple[OpenMLDataset, Response]: ... class TasksAPI(ResourceAPI): - resource_type: ResourceType | None = ResourceType.TASKS + resource_type: ResourceType = ResourceType.TASK @abstractmethod def get( diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py index 8a81517e5..ce7b02057 100644 --- a/openml/_api/resources/base/versions.py +++ b/openml/_api/resources/base/versions.py @@ -1,23 +1,83 @@ from __future__ import annotations -from openml._api.resources.base import APIVersion, ResourceAPI +import xmltodict + +from openml._api.resources.base import APIVersion, ResourceAPI, ResourceType +from openml.exceptions import ( + OpenMLNotAuthorizedError, + OpenMLServerError, + OpenMLServerException, +) class ResourceV1(ResourceAPI): - api_version: APIVersion | None = APIVersion.V1 + api_version: APIVersion = APIVersion.V1 - def delete(self) -> None: - pass + def delete(self, resource_id: int) -> bool: + if self.resource_type == ResourceType.DATASET: + resource_type = "data" + else: + resource_type = self.resource_type.name + + legal_resources = { + "data", + "flow", + "task", + "run", + "study", + "user", + } + if resource_type not in legal_resources: + raise ValueError(f"Can't delete a {resource_type}") + + url_suffix = f"{resource_type}/{resource_id}" + try: + response = self._http.delete(url_suffix) + result = xmltodict.parse(response.content) + return f"oml:{resource_type}_delete" in result + except OpenMLServerException as e: + # https://github.com/openml/OpenML/blob/21f6188d08ac24fcd2df06ab94cf421c946971b0/openml_OS/views/pages/api_new/v1/xml/pre.php + # Most exceptions are descriptive enough to be raised as their standard + # OpenMLServerException, however there are two cases where we add information: + # - a generic "failed" message, we direct them to the right issue board + # - when the user successfully authenticates with the server, + # but user is not allowed to take the requested action, + # in which case we specify a OpenMLNotAuthorizedError. + by_other_user = [323, 353, 393, 453, 594] + has_dependent_entities = [324, 326, 327, 328, 354, 454, 464, 595] + unknown_reason = [325, 355, 394, 455, 593] + if e.code in by_other_user: + raise OpenMLNotAuthorizedError( + message=( + f"The {resource_type} can not be deleted " + "because it was not uploaded by you." + ), + ) from e + if e.code in has_dependent_entities: + raise OpenMLNotAuthorizedError( + message=( + f"The {resource_type} can not be deleted because " + f"it still has associated entities: {e.message}" + ), + ) from e + if e.code in unknown_reason: + raise OpenMLServerError( + message=( + f"The {resource_type} can not be deleted for unknown reason," + " please open an issue at: https://github.com/openml/openml/issues/new" + ), + ) from e + raise e def publish(self) -> None: pass class ResourceV2(ResourceAPI): - api_version: APIVersion | None = APIVersion.V2 + api_version: APIVersion = APIVersion.V2 - def delete(self) -> None: - self._raise_not_implemented_error("delete") + def delete(self, resource_id: int) -> bool: + raise NotImplementedError(self._get_not_implemented_message("publish")) def publish(self) -> None: - self._raise_not_implemented_error("publish") + raise NotImplementedError(self._get_not_implemented_message("publish")) From 1fe7e3ed8561945c20e8433603046a35484c37e7 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 27 Jan 2026 12:56:35 +0500 Subject: [PATCH 15/54] implement publish and minor refactoring --- openml/_api/clients/http.py | 2 - openml/_api/resources/base/base.py | 15 ++-- openml/_api/resources/base/versions.py | 113 ++++++++++++++++--------- 3 files changed, 82 insertions(+), 48 deletions(-) diff --git a/openml/_api/clients/http.py b/openml/_api/clients/http.py index dc184074d..1622087c9 100644 --- a/openml/_api/clients/http.py +++ b/openml/_api/clients/http.py @@ -338,8 +338,6 @@ def request( timeout = request_kwargs.pop("timeout", self.timeout) files = request_kwargs.pop("files", None) - use_cache = False - if use_cache and self.cache is not None: cache_key = self.cache.get_key(url, params) try: diff --git a/openml/_api/resources/base/base.py b/openml/_api/resources/base/base.py index 9b1803508..f2d7d1e88 100644 --- a/openml/_api/resources/base/base.py +++ b/openml/_api/resources/base/base.py @@ -5,6 +5,9 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: + from collections.abc import Mapping + from typing import Any + from openml._api.clients import HTTPClient @@ -34,6 +37,12 @@ class ResourceAPI(ABC): def __init__(self, http: HTTPClient): self._http = http + @abstractmethod + def delete(self, resource_id: int) -> bool: ... + + @abstractmethod + def publish(self, path: str, files: Mapping[str, Any] | None) -> int: ... + def _get_not_implemented_message(self, method_name: str | None = None) -> str: version = getattr(self.api_version, "name", "Unknown version") resource = getattr(self.resource_type, "name", "Unknown resource") @@ -42,9 +51,3 @@ def _get_not_implemented_message(self, method_name: str | None = None) -> str: f"{self.__class__.__name__}: {version} API does not support this " f"functionality for resource: {resource}.{method_info}" ) - - @abstractmethod - def delete(self, resource_id: int) -> bool: ... - - @abstractmethod - def publish(self) -> None: ... diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py index ce7b02057..41f883ebe 100644 --- a/openml/_api/resources/base/versions.py +++ b/openml/_api/resources/base/versions.py @@ -1,5 +1,8 @@ from __future__ import annotations +from collections.abc import Mapping +from typing import Any + import xmltodict from openml._api.resources.base import APIVersion, ResourceAPI, ResourceType @@ -13,6 +16,11 @@ class ResourceV1(ResourceAPI): api_version: APIVersion = APIVersion.V1 + def publish(self, path: str, files: Mapping[str, Any] | None) -> int: + response = self._http.post(path, files=files) + parsed_response = xmltodict.parse(response.content) + return self._extract_id_from_upload(parsed_response) + def delete(self, resource_id: int) -> bool: if self.resource_type == ResourceType.DATASET: resource_type = "data" @@ -30,54 +38,79 @@ def delete(self, resource_id: int) -> bool: if resource_type not in legal_resources: raise ValueError(f"Can't delete a {resource_type}") - url_suffix = f"{resource_type}/{resource_id}" + path = f"{resource_type}/{resource_id}" try: - response = self._http.delete(url_suffix) + response = self._http.delete(path) result = xmltodict.parse(response.content) return f"oml:{resource_type}_delete" in result except OpenMLServerException as e: - # https://github.com/openml/OpenML/blob/21f6188d08ac24fcd2df06ab94cf421c946971b0/openml_OS/views/pages/api_new/v1/xml/pre.php - # Most exceptions are descriptive enough to be raised as their standard - # OpenMLServerException, however there are two cases where we add information: - # - a generic "failed" message, we direct them to the right issue board - # - when the user successfully authenticates with the server, - # but user is not allowed to take the requested action, - # in which case we specify a OpenMLNotAuthorizedError. - by_other_user = [323, 353, 393, 453, 594] - has_dependent_entities = [324, 326, 327, 328, 354, 454, 464, 595] - unknown_reason = [325, 355, 394, 455, 593] - if e.code in by_other_user: - raise OpenMLNotAuthorizedError( - message=( - f"The {resource_type} can not be deleted " - "because it was not uploaded by you." - ), - ) from e - if e.code in has_dependent_entities: - raise OpenMLNotAuthorizedError( - message=( - f"The {resource_type} can not be deleted because " - f"it still has associated entities: {e.message}" - ), - ) from e - if e.code in unknown_reason: - raise OpenMLServerError( - message=( - f"The {resource_type} can not be deleted for unknown reason," - " please open an issue at: https://github.com/openml/openml/issues/new" - ), - ) from e - raise e - - def publish(self) -> None: - pass + self._handle_delete_exception(resource_type, e) + raise + + def _handle_delete_exception( + self, resource_type: str, exception: OpenMLServerException + ) -> None: + # https://github.com/openml/OpenML/blob/21f6188d08ac24fcd2df06ab94cf421c946971b0/openml_OS/views/pages/api_new/v1/xml/pre.php + # Most exceptions are descriptive enough to be raised as their standard + # OpenMLServerException, however there are two cases where we add information: + # - a generic "failed" message, we direct them to the right issue board + # - when the user successfully authenticates with the server, + # but user is not allowed to take the requested action, + # in which case we specify a OpenMLNotAuthorizedError. + by_other_user = [323, 353, 393, 453, 594] + has_dependent_entities = [324, 326, 327, 328, 354, 454, 464, 595] + unknown_reason = [325, 355, 394, 455, 593] + if exception.code in by_other_user: + raise OpenMLNotAuthorizedError( + message=( + f"The {resource_type} can not be deleted because it was not uploaded by you." + ), + ) from exception + if exception.code in has_dependent_entities: + raise OpenMLNotAuthorizedError( + message=( + f"The {resource_type} can not be deleted because " + f"it still has associated entities: {exception.message}" + ), + ) from exception + if exception.code in unknown_reason: + raise OpenMLServerError( + message=( + f"The {resource_type} can not be deleted for unknown reason," + " please open an issue at: https://github.com/openml/openml/issues/new" + ), + ) from exception + raise exception + + def _extract_id_from_upload(self, parsed: Mapping[str, Any]) -> int: + # reads id from + # sample parsed dict: {"oml:openml": {"oml:upload_flow": {"oml:id": "42"}}} + + # xmltodict always gives exactly one root key + ((_, root_value),) = parsed.items() + + if not isinstance(root_value, Mapping): + raise ValueError("Unexpected XML structure") + + # upload node (e.g. oml:upload_task, oml:study_upload, ...) + ((_, upload_value),) = root_value.items() + + if not isinstance(upload_value, Mapping): + raise ValueError("Unexpected upload node structure") + + # ID is the only leaf value + for v in upload_value.values(): + if isinstance(v, (str, int)): + return int(v) + + raise ValueError("No ID found in upload response") class ResourceV2(ResourceAPI): api_version: APIVersion = APIVersion.V2 - def delete(self, resource_id: int) -> bool: + def publish(self, path: str, files: Mapping[str, Any] | None) -> int: raise NotImplementedError(self._get_not_implemented_message("publish")) - def publish(self) -> None: - raise NotImplementedError(self._get_not_implemented_message("publish")) + def delete(self, resource_id: int) -> bool: + raise NotImplementedError(self._get_not_implemented_message("delete")) From 54a3151932e3c50bda983f6d6609a4740e38a0c7 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 27 Jan 2026 14:17:40 +0500 Subject: [PATCH 16/54] implement tag/untag --- openml/_api/clients/http.py | 10 +++- openml/_api/resources/base/base.py | 6 +++ openml/_api/resources/base/versions.py | 63 ++++++++++++++++++++------ openml/_api/resources/tasks.py | 4 +- 4 files changed, 67 insertions(+), 16 deletions(-) diff --git a/openml/_api/clients/http.py b/openml/_api/clients/http.py index 1622087c9..65d7b2248 100644 --- a/openml/_api/clients/http.py +++ b/openml/_api/clients/http.py @@ -279,6 +279,7 @@ def _request( # noqa: PLR0913 method: str, url: str, params: Mapping[str, Any], + data: Mapping[str, Any], headers: Mapping[str, str], timeout: float | int, files: Mapping[str, Any] | None, @@ -292,6 +293,7 @@ def _request( # noqa: PLR0913 method=method, url=url, params=params, + data=data, headers=headers, timeout=timeout, files=files, @@ -326,11 +328,16 @@ def request( url = urljoin(self.server, urljoin(self.base_url, path)) retries = max(1, self.retries) - # prepare params params = request_kwargs.pop("params", {}).copy() + data = request_kwargs.pop("data", {}).copy() + if use_api_key: params["api_key"] = self.api_key + if method.upper() in {"POST", "PUT", "PATCH"}: + data = {**params, **data} + params = {} + # prepare headers headers = request_kwargs.pop("headers", {}).copy() headers.update(self.headers) @@ -352,6 +359,7 @@ def request( method=method, url=url, params=params, + data=data, headers=headers, timeout=timeout, files=files, diff --git a/openml/_api/resources/base/base.py b/openml/_api/resources/base/base.py index f2d7d1e88..63d4c40eb 100644 --- a/openml/_api/resources/base/base.py +++ b/openml/_api/resources/base/base.py @@ -43,6 +43,12 @@ def delete(self, resource_id: int) -> bool: ... @abstractmethod def publish(self, path: str, files: Mapping[str, Any] | None) -> int: ... + @abstractmethod + def tag(self, resource_id: int, tag: str) -> list[str]: ... + + @abstractmethod + def untag(self, resource_id: int, tag: str) -> list[str]: ... + def _get_not_implemented_message(self, method_name: str | None = None) -> str: version = getattr(self.api_version, "name", "Unknown version") resource = getattr(self.resource_type, "name", "Unknown resource") diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py index 41f883ebe..91c1a8c06 100644 --- a/openml/_api/resources/base/versions.py +++ b/openml/_api/resources/base/versions.py @@ -22,19 +22,9 @@ def publish(self, path: str, files: Mapping[str, Any] | None) -> int: return self._extract_id_from_upload(parsed_response) def delete(self, resource_id: int) -> bool: - if self.resource_type == ResourceType.DATASET: - resource_type = "data" - else: - resource_type = self.resource_type.name - - legal_resources = { - "data", - "flow", - "task", - "run", - "study", - "user", - } + resource_type = self._get_endpoint_name() + + legal_resources = {"data", "flow", "task", "run", "study", "user"} if resource_type not in legal_resources: raise ValueError(f"Can't delete a {resource_type}") @@ -47,6 +37,47 @@ def delete(self, resource_id: int) -> bool: self._handle_delete_exception(resource_type, e) raise + def tag(self, resource_id: int, tag: str) -> list[str]: + resource_type = self._get_endpoint_name() + + legal_resources = {"data", "task", "flow", "setup", "run"} + if resource_type not in legal_resources: + raise ValueError(f"Can't tag a {resource_type}") + + path = f"{resource_type}/tag" + data = {f"{resource_type}_id": resource_id, "tag": tag} + response = self._http.post(path, data=data) + + main_tag = f"oml:{resource_type}_tag" + parsed_response = xmltodict.parse(response.content, force_list={"oml:tag"}) + result = parsed_response[main_tag] + tags: list[str] = result.get("oml:tag", []) + + return tags + + def untag(self, resource_id: int, tag: str) -> list[str]: + resource_type = self._get_endpoint_name() + + legal_resources = {"data", "task", "flow", "setup", "run"} + if resource_type not in legal_resources: + raise ValueError(f"Can't tag a {resource_type}") + + path = f"{resource_type}/untag" + data = {f"{resource_type}_id": resource_id, "tag": tag} + response = self._http.post(path, data=data) + + main_tag = f"oml:{resource_type}_untag" + parsed_response = xmltodict.parse(response.content, force_list={"oml:tag"}) + result = parsed_response[main_tag] + tags: list[str] = result.get("oml:tag", []) + + return tags + + def _get_endpoint_name(self) -> str: + if self.resource_type == ResourceType.DATASET: + return "data" + return self.resource_type.name + def _handle_delete_exception( self, resource_type: str, exception: OpenMLServerException ) -> None: @@ -114,3 +145,9 @@ def publish(self, path: str, files: Mapping[str, Any] | None) -> int: def delete(self, resource_id: int) -> bool: raise NotImplementedError(self._get_not_implemented_message("delete")) + + def tag(self, resource_id: int, tag: str) -> list[str]: + raise NotImplementedError(self._get_not_implemented_message("untag")) + + def untag(self, resource_id: int, tag: str) -> list[str]: + raise NotImplementedError(self._get_not_implemented_message("untag")) diff --git a/openml/_api/resources/tasks.py b/openml/_api/resources/tasks.py index a7ca39208..295e7a73d 100644 --- a/openml/_api/resources/tasks.py +++ b/openml/_api/resources/tasks.py @@ -26,7 +26,7 @@ def get( return_response: bool = False, ) -> OpenMLTask | tuple[OpenMLTask, Response]: path = f"task/{task_id}" - response = self._http.get(path) + response = self._http.get(path, use_cache=True) xml_content = response.text task = self._create_task_from_xml(xml_content) @@ -125,4 +125,4 @@ def get( *, return_response: bool = False, ) -> OpenMLTask | tuple[OpenMLTask, Response]: - raise NotImplementedError + raise NotImplementedError(self._get_not_implemented_message("get")) From 2b6fe6507b349703060f060f0184169abf5e20de Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 27 Jan 2026 18:31:39 +0500 Subject: [PATCH 17/54] implement fallback --- openml/_api/resources/__init__.py | 3 +- openml/_api/resources/base/__init__.py | 2 + openml/_api/resources/base/fallback.py | 56 ++++++++++++++++++++++++++ openml/_api/runtime/core.py | 8 +++- openml/_api/runtime/fallback.py | 12 ------ 5 files changed, 66 insertions(+), 15 deletions(-) create mode 100644 openml/_api/resources/base/fallback.py delete mode 100644 openml/_api/runtime/fallback.py diff --git a/openml/_api/resources/__init__.py b/openml/_api/resources/__init__.py index b1af3c1a8..6c0807e0f 100644 --- a/openml/_api/resources/__init__.py +++ b/openml/_api/resources/__init__.py @@ -1,4 +1,5 @@ +from openml._api.resources.base.fallback import FallbackProxy from openml._api.resources.datasets import DatasetsV1, DatasetsV2 from openml._api.resources.tasks import TasksV1, TasksV2 -__all__ = ["DatasetsV1", "DatasetsV2", "TasksV1", "TasksV2"] +__all__ = ["DatasetsV1", "DatasetsV2", "FallbackProxy", "TasksV1", "TasksV2"] diff --git a/openml/_api/resources/base/__init__.py b/openml/_api/resources/base/__init__.py index 851cfe942..bddc09b21 100644 --- a/openml/_api/resources/base/__init__.py +++ b/openml/_api/resources/base/__init__.py @@ -1,10 +1,12 @@ from openml._api.resources.base.base import APIVersion, ResourceAPI, ResourceType +from openml._api.resources.base.fallback import FallbackProxy from openml._api.resources.base.resources import DatasetsAPI, TasksAPI from openml._api.resources.base.versions import ResourceV1, ResourceV2 __all__ = [ "APIVersion", "DatasetsAPI", + "FallbackProxy", "ResourceAPI", "ResourceType", "ResourceV1", diff --git a/openml/_api/resources/base/fallback.py b/openml/_api/resources/base/fallback.py new file mode 100644 index 000000000..253ee3865 --- /dev/null +++ b/openml/_api/resources/base/fallback.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from collections.abc import Callable +from typing import Any + + +class FallbackProxy: + def __init__(self, *api_versions: Any): + if not api_versions: + raise ValueError("At least one API version must be provided") + self._apis = api_versions + + def __getattr__(self, name: str) -> Any: + api, attr = self._find_attr(name) + if callable(attr): + return self._wrap_callable(name, api, attr) + return attr + + def _find_attr(self, name: str) -> tuple[Any, Any]: + for api in self._apis: + attr = getattr(api, name, None) + if attr is not None: + return api, attr + raise AttributeError(f"{self.__class__.__name__} has no attribute {name}") + + def _wrap_callable( + self, + name: str, + primary_api: Any, + primary_attr: Callable[..., Any], + ) -> Callable[..., Any]: + def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return primary_attr(*args, **kwargs) + except NotImplementedError: + return self._call_fallbacks(name, primary_api, *args, **kwargs) + + return wrapper + + def _call_fallbacks( + self, + name: str, + skip_api: Any, + *args: Any, + **kwargs: Any, + ) -> Any: + for api in self._apis: + if api is skip_api: + continue + attr = getattr(api, name, None) + if callable(attr): + try: + return attr(*args, **kwargs) + except NotImplementedError: + continue + raise NotImplementedError(f"Could not fallback to any API for method: {name}") diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index 25f2649ee..4914179f8 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -8,6 +8,7 @@ from openml._api.resources import ( DatasetsV1, DatasetsV2, + FallbackProxy, TasksV1, TasksV2, ) @@ -17,7 +18,7 @@ class APIBackend: - def __init__(self, *, datasets: DatasetsAPI, tasks: TasksAPI): + def __init__(self, *, datasets: DatasetsAPI | FallbackProxy, tasks: TasksAPI | FallbackProxy): self.datasets = datasets self.tasks = tasks @@ -62,7 +63,10 @@ def build_backend(version: str, *, strict: bool) -> APIBackend: if strict: return v2 - return v1 + return APIBackend( + datasets=FallbackProxy(DatasetsV2(v2_http_client), DatasetsV1(v1_http_client)), + tasks=FallbackProxy(TasksV2(v2_http_client), TasksV1(v1_http_client)), + ) class APIContext: diff --git a/openml/_api/runtime/fallback.py b/openml/_api/runtime/fallback.py deleted file mode 100644 index 1bc99d270..000000000 --- a/openml/_api/runtime/fallback.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from openml._api.resources.base import ResourceAPI - - -class FallbackProxy: - def __init__(self, primary: ResourceAPI, fallback: ResourceAPI): - self._primary = primary - self._fallback = fallback From fa53f8d3e10dabde3634c05a97d67560459bcaa6 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Wed, 28 Jan 2026 13:50:42 +0500 Subject: [PATCH 18/54] add test_http.py --- openml/testing.py | 88 +++++++++++++++++++++++ tests/test_api/test_http.py | 134 ++++++++++++++++++++++++++++++++++++ 2 files changed, 222 insertions(+) create mode 100644 tests/test_api/test_http.py diff --git a/openml/testing.py b/openml/testing.py index 8d3bbbd5b..b0aaac9be 100644 --- a/openml/testing.py +++ b/openml/testing.py @@ -11,10 +11,13 @@ import unittest from pathlib import Path from typing import ClassVar +from urllib.parse import urljoin import requests import openml +from openml._api.clients import HTTPCache, HTTPClient +from openml._api.config import RetryPolicy from openml.exceptions import OpenMLServerException from openml.tasks import TaskType @@ -276,6 +279,91 @@ def _check_fold_timing_evaluations( # noqa: PLR0913 assert evaluation <= max_val +class TestAPIBase(unittest.TestCase): + server: str + base_url: str + api_key: str + timeout: int + retries: int + retry_policy: RetryPolicy + dir: str + ttl: int + cache: HTTPCache + http_client: HTTPClient + + def setUp(self) -> None: + self.server = "https://test.openml.org/" + self.base_url = "api/v1/xml" + self.api_key = "normaluser" + self.timeout = 10 + self.retries = 3 + self.retry_policy = RetryPolicy.HUMAN + self.dir = "test_cache" + self.ttl = 60 * 60 * 24 * 7 + + self.cache = self._get_http_cache( + path=Path(self.dir), + ttl=self.ttl, + ) + self.http_client = self._get_http_client( + server=self.server, + base_url=self.base_url, + api_key=self.api_key, + timeout=self.timeout, + retries=self.retries, + retry_policy=self.retry_policy, + cache=self.cache, + ) + + if self.cache.path.exists(): + shutil.rmtree(self.cache.path) + + def tearDown(self) -> None: + if self.cache.path.exists(): + shutil.rmtree(self.cache.path) + + def _get_http_cache( + self, + path: Path, + ttl: int, + ) -> HTTPCache: + return HTTPCache( + path=path, + ttl=ttl, + ) + + def _get_http_client( # noqa: PLR0913 + self, + server: str, + base_url: str, + api_key: str, + timeout: int, + retries: int, + retry_policy: RetryPolicy, + cache: HTTPCache | None = None, + ) -> HTTPClient: + return HTTPClient( + server=server, + base_url=base_url, + api_key=api_key, + timeout=timeout, + retries=retries, + retry_policy=retry_policy, + cache=cache, + ) + + def _get_url( + self, + server: str | None = None, + base_url: str | None = None, + path: str | None = None, + ) -> str: + server = server if server else self.server + base_url = base_url if base_url else self.base_url + path = path if path else "" + return urljoin(self.server, urljoin(self.base_url, path)) + + def check_task_existence( task_type: TaskType, dataset_id: int, diff --git a/tests/test_api/test_http.py b/tests/test_api/test_http.py new file mode 100644 index 000000000..98b6fda5a --- /dev/null +++ b/tests/test_api/test_http.py @@ -0,0 +1,134 @@ +from requests import Response, Request +import time +import xmltodict +from openml.testing import TestAPIBase + + +class TestHTTPClient(TestAPIBase): + def test_cache(self): + url = self._get_url(path="task/31") + params = {"param1": "value1", "param2": "value2"} + + key = self.cache.get_key(url, params) + + # validate key + self.assertEqual( + key, + "org/openml/test/api/v1/task/31/param1=value1¶m2=value2", + ) + + # create fake response + req = Request("GET", url).prepare() + response = Response() + response.status_code = 200 + response.url = url + response.reason = "OK" + response._content = b"test" + response.headers = {"Content-Type": "text/xml"} + response.encoding = "utf-8" + response.request = req + response.elapsed = type("Elapsed", (), {"total_seconds": lambda self: 0.1})() + + # save to cache + self.cache.save(key, response) + + # load from cache + cached_response = self.cache.load(key) + + # validate loaded response + self.assertEqual(cached_response.status_code, 200) + self.assertEqual(cached_response.url, url) + self.assertEqual(cached_response.content, b"test") + self.assertEqual( + cached_response.headers["Content-Type"], "text/xml" + ) + + def test_get(self): + response = self.http_client.get("task/1") + + self.assertEqual(response.status_code, 200) + self.assertIn(b" new request + self.assertNotEqual(response1_cache_time_stamp, response2_cache_time_stamp) + self.assertEqual(response2.status_code, 200) + self.assertEqual(response1.content, response2.content) + + def test_post_and_delete(self): + task_xml = """ + + 5 + 193 + 17 + + """ + + task_id = None + try: + # POST the task + post_response = self.http_client.post( + "task", + files={"description": task_xml}, + ) + self.assertEqual(post_response.status_code, 200) + xml_resp = xmltodict.parse(post_response.content) + task_id = int(xml_resp["oml:upload_task"]["oml:id"]) + + # GET the task to verify it exists + get_response = self.http_client.get(f"task/{task_id}") + self.assertEqual(get_response.status_code, 200) + + finally: + # DELETE the task if it was created + if task_id is not None: + try: + del_response = self.http_client.delete(f"task/{task_id}") + # optional: verify delete + if del_response.status_code != 200: + print(f"Warning: delete failed for task {task_id}") + except Exception as e: + print(f"Warning: failed to delete task {task_id}: {e}") From 2b2db962fc252a2b2b23f21bd1d055905ed74588 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Wed, 28 Jan 2026 13:52:43 +0500 Subject: [PATCH 19/54] add uses_test_server marker --- tests/test_api/test_http.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/test_api/test_http.py b/tests/test_api/test_http.py index 98b6fda5a..94ce5ee93 100644 --- a/tests/test_api/test_http.py +++ b/tests/test_api/test_http.py @@ -1,6 +1,7 @@ from requests import Response, Request import time import xmltodict +import pytest from openml.testing import TestAPIBase @@ -43,12 +44,14 @@ def test_cache(self): cached_response.headers["Content-Type"], "text/xml" ) + @pytest.mark.uses_test_server() def test_get(self): response = self.http_client.get("task/1") self.assertEqual(response.status_code, 200) self.assertIn(b" From c9617f932fce853dbe6db9a445ef98cc6cfec7f4 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Thu, 29 Jan 2026 14:40:09 +0500 Subject: [PATCH 20/54] implement reset_cache --- openml/_api/clients/http.py | 6 +++++- tests/test_api/test_http.py | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/openml/_api/clients/http.py b/openml/_api/clients/http.py index 65d7b2248..dfcdf5a8a 100644 --- a/openml/_api/clients/http.py +++ b/openml/_api/clients/http.py @@ -322,6 +322,7 @@ def request( path: str, *, use_cache: bool = False, + reset_cache: bool = False, use_api_key: bool = False, **request_kwargs: Any, ) -> Response: @@ -345,7 +346,7 @@ def request( timeout = request_kwargs.pop("timeout", self.timeout) files = request_kwargs.pop("files", None) - if use_cache and self.cache is not None: + if use_cache and not reset_cache and self.cache is not None: cache_key = self.cache.get_key(url, params) try: return self.cache.load(cache_key) @@ -379,6 +380,7 @@ def request( assert response is not None if use_cache and self.cache is not None: + cache_key = self.cache.get_key(url, params) self.cache.save(cache_key, response) return response @@ -388,6 +390,7 @@ def get( path: str, *, use_cache: bool = False, + reset_cache: bool = False, use_api_key: bool = False, **request_kwargs: Any, ) -> Response: @@ -395,6 +398,7 @@ def get( method="GET", path=path, use_cache=use_cache, + reset_cache=reset_cache, use_api_key=use_api_key, **request_kwargs, ) diff --git a/tests/test_api/test_http.py b/tests/test_api/test_http.py index 94ce5ee93..808321862 100644 --- a/tests/test_api/test_http.py +++ b/tests/test_api/test_http.py @@ -103,6 +103,24 @@ def test_get_cache_expires(self): self.assertEqual(response2.status_code, 200) self.assertEqual(response1.content, response2.content) + @pytest.mark.uses_test_server() + def test_get_reset_cache(self): + path = "task/1" + + url = self._get_url(path=path) + key = self.cache.get_key(url, {}) + cache_path = self.cache._key_to_path(key) / "meta.json" + + response1 = self.http_client.get(path, use_cache=True) + response1_cache_time_stamp = cache_path.stat().st_ctime + + response2 = self.http_client.get(path, use_cache=True, reset_cache=True) + response2_cache_time_stamp = cache_path.stat().st_ctime + + self.assertNotEqual(response1_cache_time_stamp, response2_cache_time_stamp) + self.assertEqual(response2.status_code, 200) + self.assertEqual(response1.content, response2.content) + @pytest.mark.uses_test_server() def test_post_and_delete(self): task_xml = """ From 5bc37b80abc86e89644e431f48ca2d4d4ad7814c Mon Sep 17 00:00:00 2001 From: geetu040 Date: Thu, 29 Jan 2026 22:02:38 +0500 Subject: [PATCH 21/54] fixes with publish/delete --- openml/_api/resources/base/versions.py | 22 ++++++------- tests/test_api/test_http.py | 9 ++---- tests/test_api/test_versions.py | 44 ++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 18 deletions(-) create mode 100644 tests/test_api/test_versions.py diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py index 91c1a8c06..6ca2dd345 100644 --- a/openml/_api/resources/base/versions.py +++ b/openml/_api/resources/base/versions.py @@ -1,7 +1,7 @@ from __future__ import annotations from collections.abc import Mapping -from typing import Any +from typing import Any, cast import xmltodict @@ -76,7 +76,7 @@ def untag(self, resource_id: int, tag: str) -> list[str]: def _get_endpoint_name(self) -> str: if self.resource_type == ResourceType.DATASET: return "data" - return self.resource_type.name + return cast("str", self.resource_type.value) def _handle_delete_exception( self, resource_type: str, exception: OpenMLServerException @@ -114,8 +114,8 @@ def _handle_delete_exception( raise exception def _extract_id_from_upload(self, parsed: Mapping[str, Any]) -> int: - # reads id from - # sample parsed dict: {"oml:openml": {"oml:upload_flow": {"oml:id": "42"}}} + # reads id from upload response + # actual parsed dict: {"oml:upload_flow": {"@xmlns:oml": "...", "oml:id": "42"}} # xmltodict always gives exactly one root key ((_, root_value),) = parsed.items() @@ -123,14 +123,14 @@ def _extract_id_from_upload(self, parsed: Mapping[str, Any]) -> int: if not isinstance(root_value, Mapping): raise ValueError("Unexpected XML structure") - # upload node (e.g. oml:upload_task, oml:study_upload, ...) - ((_, upload_value),) = root_value.items() + # Look for oml:id directly in the root value + if "oml:id" in root_value: + id_value = root_value["oml:id"] + if isinstance(id_value, (str, int)): + return int(id_value) - if not isinstance(upload_value, Mapping): - raise ValueError("Unexpected upload node structure") - - # ID is the only leaf value - for v in upload_value.values(): + # Fallback: check all values for numeric/string IDs + for v in root_value.values(): if isinstance(v, (str, int)): return int(v) diff --git a/tests/test_api/test_http.py b/tests/test_api/test_http.py index 808321862..c16759558 100644 --- a/tests/test_api/test_http.py +++ b/tests/test_api/test_http.py @@ -149,10 +149,5 @@ def test_post_and_delete(self): finally: # DELETE the task if it was created if task_id is not None: - try: - del_response = self.http_client.delete(f"task/{task_id}") - # optional: verify delete - if del_response.status_code != 200: - print(f"Warning: delete failed for task {task_id}") - except Exception as e: - print(f"Warning: failed to delete task {task_id}: {e}") + del_response = self.http_client.delete(f"task/{task_id}") + self.assertEqual(del_response.status_code, 200) diff --git a/tests/test_api/test_versions.py b/tests/test_api/test_versions.py new file mode 100644 index 000000000..d3b1cd45d --- /dev/null +++ b/tests/test_api/test_versions.py @@ -0,0 +1,44 @@ +import pytest +from openml.testing import TestAPIBase +from openml._api.resources.base.versions import ResourceV1 +from openml._api.resources.base.resources import ResourceType + + +class TestResourceV1(TestAPIBase): + def setUp(self): + super().setUp() + self.resource = ResourceV1(self.http_client) + self.resource.resource_type = ResourceType.TASK + + @pytest.mark.uses_test_server() + def test_publish_and_delete(self): + task_xml = """ + + 5 + 193 + 17 + + """ + + task_id = None + try: + # Publish the task + task_id = self.resource.publish( + "task", + files={"description": task_xml}, + ) + + # Get the task to verify it exists + get_response = self.http_client.get(f"task/{task_id}") + self.assertEqual(get_response.status_code, 200) + + finally: + # delete the task if it was created + if task_id is not None: + success = self.resource.delete(task_id) + self.assertTrue(success) + + + @pytest.mark.uses_test_server() + def test_tag_and_untag(self): + pass From 08d991686843fc2ff5d8182e96a162bc2e706f52 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Thu, 29 Jan 2026 22:05:24 +0500 Subject: [PATCH 22/54] fix cache_key in tests --- tests/test_api/test_http.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/test_api/test_http.py b/tests/test_api/test_http.py index c16759558..efaeaeeef 100644 --- a/tests/test_api/test_http.py +++ b/tests/test_api/test_http.py @@ -3,6 +3,7 @@ import xmltodict import pytest from openml.testing import TestAPIBase +import os class TestHTTPClient(TestAPIBase): @@ -11,12 +12,19 @@ def test_cache(self): params = {"param1": "value1", "param2": "value2"} key = self.cache.get_key(url, params) + expected_key = os.path.join( + "org", + "openml", + "test", + "api", + "v1", + "task", + "31", + "param1=value1¶m2=value2", + ) # validate key - self.assertEqual( - key, - "org/openml/test/api/v1/task/31/param1=value1¶m2=value2", - ) + self.assertEqual(key, expected_key) # create fake response req = Request("GET", url).prepare() From 8caba11111d93fd438915e3f697a634d362eba1f Mon Sep 17 00:00:00 2001 From: geetu040 Date: Fri, 30 Jan 2026 11:47:41 +0500 Subject: [PATCH 23/54] update _not_supported --- openml/_api/resources/base/base.py | 19 +++++++++++-------- openml/_api/resources/base/fallback.py | 8 +++++--- openml/_api/resources/base/versions.py | 16 ++++++++-------- openml/_api/resources/tasks.py | 6 +++--- openml/exceptions.py | 4 ++++ 5 files changed, 31 insertions(+), 22 deletions(-) diff --git a/openml/_api/resources/base/base.py b/openml/_api/resources/base/base.py index 63d4c40eb..38ceccbac 100644 --- a/openml/_api/resources/base/base.py +++ b/openml/_api/resources/base/base.py @@ -2,7 +2,9 @@ from abc import ABC, abstractmethod from enum import Enum -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, NoReturn + +from openml.exceptions import OpenMLNotSupportedError if TYPE_CHECKING: from collections.abc import Mapping @@ -49,11 +51,12 @@ def tag(self, resource_id: int, tag: str) -> list[str]: ... @abstractmethod def untag(self, resource_id: int, tag: str) -> list[str]: ... - def _get_not_implemented_message(self, method_name: str | None = None) -> str: - version = getattr(self.api_version, "name", "Unknown version") - resource = getattr(self.resource_type, "name", "Unknown resource") - method_info = f" Method: {method_name}" if method_name else "" - return ( - f"{self.__class__.__name__}: {version} API does not support this " - f"functionality for resource: {resource}.{method_info}" + def _not_supported(self, *, method: str) -> NoReturn: + version = getattr(self.api_version, "value", "unknown") + resource = getattr(self.resource_type, "value", "unknown") + + raise OpenMLNotSupportedError( + f"{self.__class__.__name__}: " + f"{version} API does not support `{method}` " + f"for resource `{resource}`" ) diff --git a/openml/_api/resources/base/fallback.py b/openml/_api/resources/base/fallback.py index 253ee3865..3919c36a9 100644 --- a/openml/_api/resources/base/fallback.py +++ b/openml/_api/resources/base/fallback.py @@ -3,6 +3,8 @@ from collections.abc import Callable from typing import Any +from openml.exceptions import OpenMLNotSupportedError + class FallbackProxy: def __init__(self, *api_versions: Any): @@ -32,7 +34,7 @@ def _wrap_callable( def wrapper(*args: Any, **kwargs: Any) -> Any: try: return primary_attr(*args, **kwargs) - except NotImplementedError: + except OpenMLNotSupportedError: return self._call_fallbacks(name, primary_api, *args, **kwargs) return wrapper @@ -51,6 +53,6 @@ def _call_fallbacks( if callable(attr): try: return attr(*args, **kwargs) - except NotImplementedError: + except OpenMLNotSupportedError: continue - raise NotImplementedError(f"Could not fallback to any API for method: {name}") + raise OpenMLNotSupportedError(f"Could not fallback to any API for method: {name}") diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py index 6ca2dd345..04b7617b1 100644 --- a/openml/_api/resources/base/versions.py +++ b/openml/_api/resources/base/versions.py @@ -140,14 +140,14 @@ def _extract_id_from_upload(self, parsed: Mapping[str, Any]) -> int: class ResourceV2(ResourceAPI): api_version: APIVersion = APIVersion.V2 - def publish(self, path: str, files: Mapping[str, Any] | None) -> int: - raise NotImplementedError(self._get_not_implemented_message("publish")) + def publish(self, path: str, files: Mapping[str, Any] | None) -> int: # noqa: ARG002 + self._not_supported(method="publish") - def delete(self, resource_id: int) -> bool: - raise NotImplementedError(self._get_not_implemented_message("delete")) + def delete(self, resource_id: int) -> bool: # noqa: ARG002 + self._not_supported(method="delete") - def tag(self, resource_id: int, tag: str) -> list[str]: - raise NotImplementedError(self._get_not_implemented_message("untag")) + def tag(self, resource_id: int, tag: str) -> list[str]: # noqa: ARG002 + self._not_supported(method="tag") - def untag(self, resource_id: int, tag: str) -> list[str]: - raise NotImplementedError(self._get_not_implemented_message("untag")) + def untag(self, resource_id: int, tag: str) -> list[str]: # noqa: ARG002 + self._not_supported(method="untag") diff --git a/openml/_api/resources/tasks.py b/openml/_api/resources/tasks.py index 295e7a73d..8420f8e57 100644 --- a/openml/_api/resources/tasks.py +++ b/openml/_api/resources/tasks.py @@ -121,8 +121,8 @@ def _create_task_from_xml(self, xml: str) -> OpenMLTask: class TasksV2(ResourceV2, TasksAPI): def get( self, - task_id: int, + task_id: int, # noqa: ARG002 *, - return_response: bool = False, + return_response: bool = False, # noqa: ARG002 ) -> OpenMLTask | tuple[OpenMLTask, Response]: - raise NotImplementedError(self._get_not_implemented_message("get")) + self._not_supported(method="get") diff --git a/openml/exceptions.py b/openml/exceptions.py index fe63b8a58..26c2d2591 100644 --- a/openml/exceptions.py +++ b/openml/exceptions.py @@ -65,3 +65,7 @@ class OpenMLNotAuthorizedError(OpenMLServerError): class ObjectNotPublishedError(PyOpenMLError): """Indicates an object has not been published yet.""" + + +class OpenMLNotSupportedError(PyOpenMLError): + """Raised when an API operation is not supported for a resource/version.""" From 1913c10416b74421709601d5177c1e67db93a401 Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 19:27:36 +0100 Subject: [PATCH 24/54] add 'get_api_config' skeleton method --- openml/_api/config.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/openml/_api/config.py b/openml/_api/config.py index 6cce06403..2201420d9 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -41,6 +41,9 @@ class Settings: connection: ConnectionConfig cache: CacheConfig + def get_api_config(self, version: str) -> APIConfig: + pass + settings = Settings( api=APISettings( From 7681949675f3c72e09d09d810aaa11acd78c6811 Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 19:29:13 +0100 Subject: [PATCH 25/54] remove 'APISettings' --- openml/_api/config.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/openml/_api/config.py b/openml/_api/config.py index 2201420d9..893b950c6 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -17,12 +17,6 @@ class APIConfig: timeout: int = 10 # seconds -@dataclass -class APISettings: - v1: APIConfig - v2: APIConfig - - @dataclass class ConnectionConfig: retries: int = 3 From 01840a5a09442228f708daf45c32acbd05ce0e8b Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 19:34:11 +0100 Subject: [PATCH 26/54] impl. 'get_api_config' --- openml/_api/config.py | 54 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 5 deletions(-) diff --git a/openml/_api/config.py b/openml/_api/config.py index 893b950c6..8600156f7 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -29,14 +29,58 @@ class CacheConfig: ttl: int = 60 * 60 * 24 * 7 # one week -@dataclass class Settings: - api: APISettings - connection: ConnectionConfig - cache: CacheConfig + def __init__(self) -> None: + self.api_configs: dict[str, APIConfig] = {} + self.connection = ConnectionConfig() + self.cache = CacheConfig() + self._initialized = False def get_api_config(self, version: str) -> APIConfig: - pass + """Get API config for a version, with lazy initialization from openml.config.""" + if not self._initialized: + self._init_from_legacy_config() + if version not in self.api_configs: + raise NotImplementedError( + f"API {version} is not yet available. " + f"Supported versions: {list(self.api_configs.keys())}" + ) + return self.api_configs[version] + + def _init_from_legacy_config(self) -> None: + """Lazy init from openml.config to avoid circular imports.""" + if self._initialized: + return + + # Import here to avoid circular import at module load time + import openml.config as legacy + + # Parse server URL to extract base components + # e.g., "https://www.openml.org/api/v1/xml" -> server="https://www.openml.org/" + server_url = legacy.server + if "/api" in server_url: + server_base = server_url.rsplit("/api", 1)[0] + "/" + else: + server_base = server_url + + self.api_configs["v1"] = APIConfig( + server=server_base, + base_url="api/v1/xml/", + api_key=legacy.apikey, + ) + + # Sync connection settings from legacy config + self.connection = ConnectionConfig( + retries=legacy.connection_n_retries, + retry_policy=RetryPolicy(legacy.retry_policy), + ) + + # Sync cache settings from legacy config + self.cache = CacheConfig( + dir=str(legacy._root_cache_directory), + ) + + self._initialized = True settings = Settings( From 26ed4c1ee0ab9571f74726795e050b7d47110227 Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 19:39:43 +0100 Subject: [PATCH 27/54] add singleton pattern for settings --- openml/_api/config.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/openml/_api/config.py b/openml/_api/config.py index 8600156f7..ee3240556 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -99,3 +99,18 @@ def _init_from_legacy_config(self) -> None: connection=ConnectionConfig(), cache=CacheConfig(), ) + + +_settings = None + + +def get_settings() -> Settings: + """Get settings singleton, creating on first access. + + Settings are lazily initialized from openml.config when first accessed, + avoiding circular imports at module load time. + """ + global _settings + if _settings is None: + _settings = Settings() + return _settings From c588d0cd456233894fa67a56e7a814c36ca25761 Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 19:40:19 +0100 Subject: [PATCH 28/54] add 'reset_settings' --- openml/_api/config.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/openml/_api/config.py b/openml/_api/config.py index ee3240556..5670698c8 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -114,3 +114,9 @@ def get_settings() -> Settings: if _settings is None: _settings = Settings() return _settings + + +def reset_settings() -> None: + """Reset the settings singleton. Could be useful for testing.""" + global _settings + _settings = None From b6ff7207c5d8428c885f498986d2a5abf0d66ac3 Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 19:40:32 +0100 Subject: [PATCH 29/54] remove unused code --- openml/_api/config.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/openml/_api/config.py b/openml/_api/config.py index 5670698c8..4dc408428 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -83,24 +83,6 @@ def _init_from_legacy_config(self) -> None: self._initialized = True -settings = Settings( - api=APISettings( - v1=APIConfig( - server="https://www.openml.org/", - base_url="api/v1/xml/", - api_key="...", - ), - v2=APIConfig( - server="http://127.0.0.1:8001/", - base_url="", - api_key="...", - ), - ), - connection=ConnectionConfig(), - cache=CacheConfig(), -) - - _settings = None From 80d5afc1e0784abe264b10abaabe40fec7984792 Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 19:44:44 +0100 Subject: [PATCH 30/54] reimplement usage of v1 settings config --- openml/_api/runtime/core.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index 4914179f8..5e55d61cb 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -4,7 +4,7 @@ from typing import TYPE_CHECKING from openml._api.clients import HTTPCache, HTTPClient -from openml._api.config import settings +from openml._api.config import get_settings from openml._api.resources import ( DatasetsV1, DatasetsV2, @@ -18,30 +18,29 @@ class APIBackend: - def __init__(self, *, datasets: DatasetsAPI | FallbackProxy, tasks: TasksAPI | FallbackProxy): + def __init__( + self, *, datasets: DatasetsAPI | FallbackProxy, tasks: TasksAPI | FallbackProxy + ): self.datasets = datasets self.tasks = tasks def build_backend(version: str, *, strict: bool) -> APIBackend: + settings = get_settings() + + # Get config for v1 (lazy init from openml.config) + v1_config = settings.get_api_config("v1") + http_cache = HTTPCache( - path=Path(settings.cache.dir), + path=Path(settings.cache.dir).expanduser(), ttl=settings.cache.ttl, ) + v1_http_client = HTTPClient( - server=settings.api.v1.server, - base_url=settings.api.v1.base_url, - api_key=settings.api.v1.api_key, - timeout=settings.api.v1.timeout, - retries=settings.connection.retries, - retry_policy=settings.connection.retry_policy, - cache=http_cache, - ) - v2_http_client = HTTPClient( - server=settings.api.v2.server, - base_url=settings.api.v2.base_url, - api_key=settings.api.v2.api_key, - timeout=settings.api.v2.timeout, + server=v1_config.server, + base_url=v1_config.base_url, + api_key=v1_config.api_key, + timeout=v1_config.timeout, retries=settings.connection.retries, retry_policy=settings.connection.retry_policy, cache=http_cache, From f47112c7b9eb1710ddf7b79ea97b3f8c0b0cbf49 Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 19:47:25 +0100 Subject: [PATCH 31/54] first try v2, fallback to v1 if not available --- openml/_api/runtime/core.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index 5e55d61cb..24fd2c248 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -54,6 +54,25 @@ def build_backend(version: str, *, strict: bool) -> APIBackend: if version == "v1": return v1 + # V2 support - will raise NotImplementedError if v2 config not available + try: + v2_config = settings.get_api_config("v2") + except NotImplementedError: + if strict: + raise + # Non-strict mode: fall back to v1 only + return v1 + + v2_http_client = HTTPClient( + server=v2_config.server, + base_url=v2_config.base_url, + api_key=v2_config.api_key, + timeout=v2_config.timeout, + retries=settings.connection.retries, + retry_policy=settings.connection.retry_policy, + cache=http_cache, + ) + v2 = APIBackend( datasets=DatasetsV2(v2_http_client), tasks=TasksV2(v2_http_client), From d44cf3eb5e36587ad033e24b1e54863e98df2d91 Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 19:58:42 +0100 Subject: [PATCH 32/54] reimplement singelton without the use of 'global' --- openml/_api/config.py | 46 +++++++++++++++++-------------------------- 1 file changed, 18 insertions(+), 28 deletions(-) diff --git a/openml/_api/config.py b/openml/_api/config.py index 4dc408428..c375542b8 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -30,12 +30,28 @@ class CacheConfig: class Settings: + """Settings container that reads from openml.config on access.""" + + _instance: Settings | None = None + def __init__(self) -> None: self.api_configs: dict[str, APIConfig] = {} self.connection = ConnectionConfig() self.cache = CacheConfig() self._initialized = False + @classmethod + def get(cls) -> Settings: + """Get settings singleton, creating on first access.""" + if cls._instance is None: + cls._instance = cls() + return cls._instance + + @classmethod + def reset(cls) -> None: + """Reset the settings singleton. Useful for testing.""" + cls._instance = None + def get_api_config(self, version: str) -> APIConfig: """Get API config for a version, with lazy initialization from openml.config.""" if not self._initialized: @@ -52,11 +68,8 @@ def _init_from_legacy_config(self) -> None: if self._initialized: return - # Import here to avoid circular import at module load time - import openml.config as legacy + import openml.config as legacy # Import here to avoid circular - # Parse server URL to extract base components - # e.g., "https://www.openml.org/api/v1/xml" -> server="https://www.openml.org/" server_url = legacy.server if "/api" in server_url: server_base = server_url.rsplit("/api", 1)[0] + "/" @@ -69,36 +82,13 @@ def _init_from_legacy_config(self) -> None: api_key=legacy.apikey, ) - # Sync connection settings from legacy config + # Sync connection- and cache- settings from legacy config self.connection = ConnectionConfig( retries=legacy.connection_n_retries, retry_policy=RetryPolicy(legacy.retry_policy), ) - - # Sync cache settings from legacy config self.cache = CacheConfig( dir=str(legacy._root_cache_directory), ) self._initialized = True - - -_settings = None - - -def get_settings() -> Settings: - """Get settings singleton, creating on first access. - - Settings are lazily initialized from openml.config when first accessed, - avoiding circular imports at module load time. - """ - global _settings - if _settings is None: - _settings = Settings() - return _settings - - -def reset_settings() -> None: - """Reset the settings singleton. Could be useful for testing.""" - global _settings - _settings = None From ea7dda17087bc25d07ea7610da25b8ec04b17ca2 Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 20:00:25 +0100 Subject: [PATCH 33/54] add explanations --- openml/_api/config.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/openml/_api/config.py b/openml/_api/config.py index c375542b8..32dd8ecf5 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -68,7 +68,11 @@ def _init_from_legacy_config(self) -> None: if self._initialized: return - import openml.config as legacy # Import here to avoid circular + # Import here (not at module level) to avoid circular imports. + # We read from openml.config to integrate with the existing config system + # where users set their API key, server, cache directory, etc. + # This avoids duplicating those settings with hardcoded values. + import openml.config as legacy server_url = legacy.server if "/api" in server_url: From f0e594784b446006e401ab4aa1d7113344b6dd0e Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 20:01:16 +0100 Subject: [PATCH 34/54] change usage of settings to new impl. --- openml/_api/runtime/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index 24fd2c248..9207fc31d 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -4,7 +4,7 @@ from typing import TYPE_CHECKING from openml._api.clients import HTTPCache, HTTPClient -from openml._api.config import get_settings +from openml._api.config import Settings from openml._api.resources import ( DatasetsV1, DatasetsV2, @@ -26,7 +26,7 @@ def __init__( def build_backend(version: str, *, strict: bool) -> APIBackend: - settings = get_settings() + settings = Settings.get() # Get config for v1 (lazy init from openml.config) v1_config = settings.get_api_config("v1") From edcd006b574a91e367d96e5c3718daf0edbc352e Mon Sep 17 00:00:00 2001 From: Simon Blanke Date: Fri, 30 Jan 2026 20:06:45 +0100 Subject: [PATCH 35/54] add explanations --- openml/_api/runtime/core.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index 9207fc31d..a73105e91 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -28,7 +28,11 @@ def __init__( def build_backend(version: str, *, strict: bool) -> APIBackend: settings = Settings.get() - # Get config for v1 (lazy init from openml.config) + # Get config for v1. On first access, this triggers lazy initialization + # from openml.config, reading the user's actual API key, server URL, + # cache directory, and retry settings. This avoids circular imports + # (openml.config is imported inside the method, not at module load time) + # and ensures we use the user's configured values rather than hardcoded defaults. v1_config = settings.get_api_config("v1") http_cache = HTTPCache( @@ -54,7 +58,11 @@ def build_backend(version: str, *, strict: bool) -> APIBackend: if version == "v1": return v1 - # V2 support - will raise NotImplementedError if v2 config not available + # V2 support. Currently v2 is not yet available, + # so get_api_config("v2") raises NotImplementedError. When v2 becomes available, + # its config will be added to Settings._init_from_legacy_config(). + # In strict mode: propagate the error. + # In non-strict mode: silently fall back to v1 only. try: v2_config = settings.get_api_config("v2") except NotImplementedError: From cde0aaeb7657a03fe6547a9b252a2f13457fc7f0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 30 Jan 2026 19:10:42 +0000 Subject: [PATCH 36/54] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- openml/_api/config.py | 5 +---- openml/_api/runtime/core.py | 4 +--- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/openml/_api/config.py b/openml/_api/config.py index 32dd8ecf5..76d30f113 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -75,10 +75,7 @@ def _init_from_legacy_config(self) -> None: import openml.config as legacy server_url = legacy.server - if "/api" in server_url: - server_base = server_url.rsplit("/api", 1)[0] + "/" - else: - server_base = server_url + server_base = server_url.rsplit("/api", 1)[0] + "/" if "/api" in server_url else server_url self.api_configs["v1"] = APIConfig( server=server_base, diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index a73105e91..22b3004a4 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -18,9 +18,7 @@ class APIBackend: - def __init__( - self, *, datasets: DatasetsAPI | FallbackProxy, tasks: TasksAPI | FallbackProxy - ): + def __init__(self, *, datasets: DatasetsAPI | FallbackProxy, tasks: TasksAPI | FallbackProxy): self.datasets = datasets self.tasks = tasks From aa1e5602b87caf59680434a17fe6cc6532f58419 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Sun, 1 Feb 2026 11:29:33 +0500 Subject: [PATCH 37/54] move to config: APIVersion, ResourceType --- openml/_api/config.py | 19 +++++++++++++++++++ openml/_api/resources/base/__init__.py | 4 +--- openml/_api/resources/base/base.py | 21 +-------------------- openml/_api/resources/base/resources.py | 3 ++- openml/_api/resources/base/versions.py | 3 ++- tests/test_api/test_versions.py | 2 +- 6 files changed, 26 insertions(+), 26 deletions(-) diff --git a/openml/_api/config.py b/openml/_api/config.py index 76d30f113..3afbf224f 100644 --- a/openml/_api/config.py +++ b/openml/_api/config.py @@ -4,6 +4,25 @@ from enum import Enum +class APIVersion(str, Enum): + V1 = "v1" + V2 = "v2" + + +class ResourceType(str, Enum): + DATASET = "dataset" + TASK = "task" + TASK_TYPE = "task_type" + EVALUATION_MEASURE = "evaluation_measure" + ESTIMATION_PROCEDURE = "estimation_procedure" + EVALUATION = "evaluation" + FLOW = "flow" + STUDY = "study" + RUN = "run" + SETUP = "setup" + USER = "user" + + class RetryPolicy(str, Enum): HUMAN = "human" ROBOT = "robot" diff --git a/openml/_api/resources/base/__init__.py b/openml/_api/resources/base/__init__.py index bddc09b21..089729d09 100644 --- a/openml/_api/resources/base/__init__.py +++ b/openml/_api/resources/base/__init__.py @@ -1,14 +1,12 @@ -from openml._api.resources.base.base import APIVersion, ResourceAPI, ResourceType +from openml._api.resources.base.base import ResourceAPI from openml._api.resources.base.fallback import FallbackProxy from openml._api.resources.base.resources import DatasetsAPI, TasksAPI from openml._api.resources.base.versions import ResourceV1, ResourceV2 __all__ = [ - "APIVersion", "DatasetsAPI", "FallbackProxy", "ResourceAPI", - "ResourceType", "ResourceV1", "ResourceV2", "TasksAPI", diff --git a/openml/_api/resources/base/base.py b/openml/_api/resources/base/base.py index 38ceccbac..dbe3e95ea 100644 --- a/openml/_api/resources/base/base.py +++ b/openml/_api/resources/base/base.py @@ -1,7 +1,6 @@ from __future__ import annotations from abc import ABC, abstractmethod -from enum import Enum from typing import TYPE_CHECKING, NoReturn from openml.exceptions import OpenMLNotSupportedError @@ -11,25 +10,7 @@ from typing import Any from openml._api.clients import HTTPClient - - -class APIVersion(str, Enum): - V1 = "v1" - V2 = "v2" - - -class ResourceType(str, Enum): - DATASET = "dataset" - TASK = "task" - TASK_TYPE = "task_type" - EVALUATION_MEASURE = "evaluation_measure" - ESTIMATION_PROCEDURE = "estimation_procedure" - EVALUATION = "evaluation" - FLOW = "flow" - STUDY = "study" - RUN = "run" - SETUP = "setup" - USER = "user" + from openml._api.config import APIVersion, ResourceType class ResourceAPI(ABC): diff --git a/openml/_api/resources/base/resources.py b/openml/_api/resources/base/resources.py index 55cb95c0d..406bdfa50 100644 --- a/openml/_api/resources/base/resources.py +++ b/openml/_api/resources/base/resources.py @@ -3,7 +3,8 @@ from abc import abstractmethod from typing import TYPE_CHECKING -from openml._api.resources.base import ResourceAPI, ResourceType +from openml._api.config import ResourceType +from openml._api.resources.base import ResourceAPI if TYPE_CHECKING: from requests import Response diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py index 04b7617b1..990c3f791 100644 --- a/openml/_api/resources/base/versions.py +++ b/openml/_api/resources/base/versions.py @@ -5,7 +5,8 @@ import xmltodict -from openml._api.resources.base import APIVersion, ResourceAPI, ResourceType +from openml._api.config import APIVersion, ResourceType +from openml._api.resources.base import ResourceAPI from openml.exceptions import ( OpenMLNotAuthorizedError, OpenMLServerError, diff --git a/tests/test_api/test_versions.py b/tests/test_api/test_versions.py index d3b1cd45d..9eb4c7a91 100644 --- a/tests/test_api/test_versions.py +++ b/tests/test_api/test_versions.py @@ -1,7 +1,7 @@ import pytest from openml.testing import TestAPIBase from openml._api.resources.base.versions import ResourceV1 -from openml._api.resources.base.resources import ResourceType +from openml._api.config import ResourceType class TestResourceV1(TestAPIBase): From 06b8497eb552e2c880e93f19224a534bef37986b Mon Sep 17 00:00:00 2001 From: geetu040 Date: Sun, 1 Feb 2026 11:48:04 +0500 Subject: [PATCH 38/54] remove api_context entirely --- openml/__init__.py | 2 ++ openml/_api/__init__.py | 8 -------- openml/_api/runtime/core.py | 12 ------------ openml/_api/runtime/instance.py | 5 +++++ 4 files changed, 7 insertions(+), 20 deletions(-) create mode 100644 openml/_api/runtime/instance.py diff --git a/openml/__init__.py b/openml/__init__.py index ae5db261f..a7c95dc2e 100644 --- a/openml/__init__.py +++ b/openml/__init__.py @@ -33,6 +33,7 @@ utils, ) from .__version__ import __version__ +from ._api.runtime.instance import _backend from .datasets import OpenMLDataFeature, OpenMLDataset from .evaluations import OpenMLEvaluation from .flows import OpenMLFlow @@ -109,6 +110,7 @@ def populate_cache( "OpenMLTask", "__version__", "_api_calls", + "_backend", "config", "datasets", "evaluations", diff --git a/openml/_api/__init__.py b/openml/_api/__init__.py index 881f40671..e69de29bb 100644 --- a/openml/_api/__init__.py +++ b/openml/_api/__init__.py @@ -1,8 +0,0 @@ -from openml._api.runtime.core import APIContext - - -def set_api_version(version: str, *, strict: bool = False) -> None: - api_context.set_version(version=version, strict=strict) - - -api_context = APIContext() diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index 22b3004a4..d4ae9b688 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -91,15 +91,3 @@ def build_backend(version: str, *, strict: bool) -> APIBackend: datasets=FallbackProxy(DatasetsV2(v2_http_client), DatasetsV1(v1_http_client)), tasks=FallbackProxy(TasksV2(v2_http_client), TasksV1(v1_http_client)), ) - - -class APIContext: - def __init__(self) -> None: - self._backend = build_backend("v1", strict=False) - - def set_version(self, version: str, *, strict: bool = False) -> None: - self._backend = build_backend(version=version, strict=strict) - - @property - def backend(self) -> APIBackend: - return self._backend diff --git a/openml/_api/runtime/instance.py b/openml/_api/runtime/instance.py new file mode 100644 index 000000000..0d945b084 --- /dev/null +++ b/openml/_api/runtime/instance.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from openml._api.runtime.core import APIBackend, build_backend + +_backend: APIBackend = build_backend("v1", strict=False) From 384da91b80d91526826df3afda4ac2624562f6f7 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Sun, 1 Feb 2026 14:40:13 +0500 Subject: [PATCH 39/54] major refactor --- openml/_api/clients/__init__.py | 2 + openml/_api/clients/minio.py | 11 + openml/_api/resources/__init__.py | 36 ++- openml/_api/resources/base/__init__.py | 29 +- openml/_api/resources/base/resources.py | 49 ++-- openml/_api/resources/base/versions.py | 4 +- openml/_api/resources/dataset.py | 11 + openml/_api/resources/datasets.py | 20 -- openml/_api/resources/estimation_procedure.py | 11 + openml/_api/resources/evaluation.py | 11 + openml/_api/resources/evaluation_measure.py | 11 + openml/_api/resources/flow.py | 11 + openml/_api/resources/run.py | 11 + openml/_api/resources/setup.py | 11 + openml/_api/resources/study.py | 11 + openml/_api/resources/task.py | 11 + openml/_api/resources/tasks.py | 128 --------- openml/_api/runtime/core.py | 251 ++++++++++++------ openml/_api/runtime/instance.py | 4 +- tests/test_api/test_versions.py | 6 +- 20 files changed, 382 insertions(+), 257 deletions(-) create mode 100644 openml/_api/resources/dataset.py delete mode 100644 openml/_api/resources/datasets.py create mode 100644 openml/_api/resources/estimation_procedure.py create mode 100644 openml/_api/resources/evaluation.py create mode 100644 openml/_api/resources/evaluation_measure.py create mode 100644 openml/_api/resources/flow.py create mode 100644 openml/_api/resources/run.py create mode 100644 openml/_api/resources/setup.py create mode 100644 openml/_api/resources/study.py create mode 100644 openml/_api/resources/task.py delete mode 100644 openml/_api/resources/tasks.py diff --git a/openml/_api/clients/__init__.py b/openml/_api/clients/__init__.py index 8a5ff94e4..42f11fbcf 100644 --- a/openml/_api/clients/__init__.py +++ b/openml/_api/clients/__init__.py @@ -1,6 +1,8 @@ from .http import HTTPCache, HTTPClient +from .minio import MinIOClient __all__ = [ "HTTPCache", "HTTPClient", + "MinIOClient", ] diff --git a/openml/_api/clients/minio.py b/openml/_api/clients/minio.py index e69de29bb..2edc8269b 100644 --- a/openml/_api/clients/minio.py +++ b/openml/_api/clients/minio.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from pathlib import Path + +from openml.__version__ import __version__ + + +class MinIOClient: + def __init__(self, path: Path | None = None) -> None: + self.path = path + self.headers: dict[str, str] = {"user-agent": f"openml-python/{__version__}"} diff --git a/openml/_api/resources/__init__.py b/openml/_api/resources/__init__.py index 6c0807e0f..b666c018b 100644 --- a/openml/_api/resources/__init__.py +++ b/openml/_api/resources/__init__.py @@ -1,5 +1,35 @@ from openml._api.resources.base.fallback import FallbackProxy -from openml._api.resources.datasets import DatasetsV1, DatasetsV2 -from openml._api.resources.tasks import TasksV1, TasksV2 +from openml._api.resources.dataset import DatasetV1API, DatasetV2API +from openml._api.resources.estimation_procedure import ( + EstimationProcedureV1API, + EstimationProcedureV2API, +) +from openml._api.resources.evaluation import EvaluationV1API, EvaluationV2API +from openml._api.resources.evaluation_measure import EvaluationMeasureV1API, EvaluationMeasureV2API +from openml._api.resources.flow import FlowV1API, FlowV2API +from openml._api.resources.run import RunV1API, RunV2API +from openml._api.resources.setup import SetupV1API, SetupV2API +from openml._api.resources.study import StudyV1API, StudyV2API +from openml._api.resources.task import TaskV1API, TaskV2API -__all__ = ["DatasetsV1", "DatasetsV2", "FallbackProxy", "TasksV1", "TasksV2"] +__all__ = [ + "DatasetV1API", + "DatasetV2API", + "EstimationProcedureV1API", + "EstimationProcedureV2API", + "EvaluationMeasureV1API", + "EvaluationMeasureV2API", + "EvaluationV1API", + "EvaluationV2API", + "FallbackProxy", + "FlowV1API", + "FlowV2API", + "RunV1API", + "RunV2API", + "SetupV1API", + "SetupV2API", + "StudyV1API", + "StudyV2API", + "TaskV1API", + "TaskV2API", +] diff --git a/openml/_api/resources/base/__init__.py b/openml/_api/resources/base/__init__.py index 089729d09..f222a0b87 100644 --- a/openml/_api/resources/base/__init__.py +++ b/openml/_api/resources/base/__init__.py @@ -1,13 +1,30 @@ from openml._api.resources.base.base import ResourceAPI from openml._api.resources.base.fallback import FallbackProxy -from openml._api.resources.base.resources import DatasetsAPI, TasksAPI -from openml._api.resources.base.versions import ResourceV1, ResourceV2 +from openml._api.resources.base.resources import ( + DatasetAPI, + EstimationProcedureAPI, + EvaluationAPI, + EvaluationMeasureAPI, + FlowAPI, + RunAPI, + SetupAPI, + StudyAPI, + TaskAPI, +) +from openml._api.resources.base.versions import ResourceV1API, ResourceV2API __all__ = [ - "DatasetsAPI", + "DatasetAPI", + "EstimationProcedureAPI", + "EvaluationAPI", + "EvaluationMeasureAPI", "FallbackProxy", + "FlowAPI", "ResourceAPI", - "ResourceV1", - "ResourceV2", - "TasksAPI", + "ResourceV1API", + "ResourceV2API", + "RunAPI", + "SetupAPI", + "StudyAPI", + "TaskAPI", ] diff --git a/openml/_api/resources/base/resources.py b/openml/_api/resources/base/resources.py index 406bdfa50..200278fc2 100644 --- a/openml/_api/resources/base/resources.py +++ b/openml/_api/resources/base/resources.py @@ -1,32 +1,49 @@ from __future__ import annotations -from abc import abstractmethod from typing import TYPE_CHECKING from openml._api.config import ResourceType from openml._api.resources.base import ResourceAPI if TYPE_CHECKING: - from requests import Response + from openml._api.clients import HTTPClient, MinIOClient - from openml.datasets.dataset import OpenMLDataset - from openml.tasks.task import OpenMLTask - -class DatasetsAPI(ResourceAPI): +class DatasetAPI(ResourceAPI): resource_type: ResourceType = ResourceType.DATASET - @abstractmethod - def get(self, dataset_id: int) -> OpenMLDataset | tuple[OpenMLDataset, Response]: ... + def __init__(self, http: HTTPClient, minio: MinIOClient): + self._minio = minio + super().__init__(http) -class TasksAPI(ResourceAPI): +class TaskAPI(ResourceAPI): resource_type: ResourceType = ResourceType.TASK - @abstractmethod - def get( - self, - task_id: int, - *, - return_response: bool = False, - ) -> OpenMLTask | tuple[OpenMLTask, Response]: ... + +class EvaluationMeasureAPI(ResourceAPI): + resource_type: ResourceType = ResourceType.EVALUATION_MEASURE + + +class EstimationProcedureAPI(ResourceAPI): + resource_type: ResourceType = ResourceType.ESTIMATION_PROCEDURE + + +class EvaluationAPI(ResourceAPI): + resource_type: ResourceType = ResourceType.EVALUATION + + +class FlowAPI(ResourceAPI): + resource_type: ResourceType = ResourceType.FLOW + + +class StudyAPI(ResourceAPI): + resource_type: ResourceType = ResourceType.STUDY + + +class RunAPI(ResourceAPI): + resource_type: ResourceType = ResourceType.RUN + + +class SetupAPI(ResourceAPI): + resource_type: ResourceType = ResourceType.SETUP diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py index 990c3f791..88ae87a1c 100644 --- a/openml/_api/resources/base/versions.py +++ b/openml/_api/resources/base/versions.py @@ -14,7 +14,7 @@ ) -class ResourceV1(ResourceAPI): +class ResourceV1API(ResourceAPI): api_version: APIVersion = APIVersion.V1 def publish(self, path: str, files: Mapping[str, Any] | None) -> int: @@ -138,7 +138,7 @@ def _extract_id_from_upload(self, parsed: Mapping[str, Any]) -> int: raise ValueError("No ID found in upload response") -class ResourceV2(ResourceAPI): +class ResourceV2API(ResourceAPI): api_version: APIVersion = APIVersion.V2 def publish(self, path: str, files: Mapping[str, Any] | None) -> int: # noqa: ARG002 diff --git a/openml/_api/resources/dataset.py b/openml/_api/resources/dataset.py new file mode 100644 index 000000000..3ecad35da --- /dev/null +++ b/openml/_api/resources/dataset.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from openml._api.resources.base import DatasetAPI, ResourceV1API, ResourceV2API + + +class DatasetV1API(ResourceV1API, DatasetAPI): + pass + + +class DatasetV2API(ResourceV2API, DatasetAPI): + pass diff --git a/openml/_api/resources/datasets.py b/openml/_api/resources/datasets.py deleted file mode 100644 index f3a49a84f..000000000 --- a/openml/_api/resources/datasets.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -from openml._api.resources.base import DatasetsAPI, ResourceV1, ResourceV2 - -if TYPE_CHECKING: - from responses import Response - - from openml.datasets.dataset import OpenMLDataset - - -class DatasetsV1(ResourceV1, DatasetsAPI): - def get(self, dataset_id: int) -> OpenMLDataset | tuple[OpenMLDataset, Response]: - raise NotImplementedError - - -class DatasetsV2(ResourceV2, DatasetsAPI): - def get(self, dataset_id: int) -> OpenMLDataset | tuple[OpenMLDataset, Response]: - raise NotImplementedError diff --git a/openml/_api/resources/estimation_procedure.py b/openml/_api/resources/estimation_procedure.py new file mode 100644 index 000000000..d2e73cfa6 --- /dev/null +++ b/openml/_api/resources/estimation_procedure.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from openml._api.resources.base import EstimationProcedureAPI, ResourceV1API, ResourceV2API + + +class EstimationProcedureV1API(ResourceV1API, EstimationProcedureAPI): + pass + + +class EstimationProcedureV2API(ResourceV2API, EstimationProcedureAPI): + pass diff --git a/openml/_api/resources/evaluation.py b/openml/_api/resources/evaluation.py new file mode 100644 index 000000000..a0149e1e5 --- /dev/null +++ b/openml/_api/resources/evaluation.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from openml._api.resources.base import EvaluationAPI, ResourceV1API, ResourceV2API + + +class EvaluationV1API(ResourceV1API, EvaluationAPI): + pass + + +class EvaluationV2API(ResourceV2API, EvaluationAPI): + pass diff --git a/openml/_api/resources/evaluation_measure.py b/openml/_api/resources/evaluation_measure.py new file mode 100644 index 000000000..bd4318417 --- /dev/null +++ b/openml/_api/resources/evaluation_measure.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from openml._api.resources.base import EvaluationMeasureAPI, ResourceV1API, ResourceV2API + + +class EvaluationMeasureV1API(ResourceV1API, EvaluationMeasureAPI): + pass + + +class EvaluationMeasureV2API(ResourceV2API, EvaluationMeasureAPI): + pass diff --git a/openml/_api/resources/flow.py b/openml/_api/resources/flow.py new file mode 100644 index 000000000..3b62abd3f --- /dev/null +++ b/openml/_api/resources/flow.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from openml._api.resources.base import FlowAPI, ResourceV1API, ResourceV2API + + +class FlowV1API(ResourceV1API, FlowAPI): + pass + + +class FlowV2API(ResourceV2API, FlowAPI): + pass diff --git a/openml/_api/resources/run.py b/openml/_api/resources/run.py new file mode 100644 index 000000000..9698c59dd --- /dev/null +++ b/openml/_api/resources/run.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from openml._api.resources.base import ResourceV1API, ResourceV2API, RunAPI + + +class RunV1API(ResourceV1API, RunAPI): + pass + + +class RunV2API(ResourceV2API, RunAPI): + pass diff --git a/openml/_api/resources/setup.py b/openml/_api/resources/setup.py new file mode 100644 index 000000000..e948e1b38 --- /dev/null +++ b/openml/_api/resources/setup.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from openml._api.resources.base import ResourceV1API, ResourceV2API, SetupAPI + + +class SetupV1API(ResourceV1API, SetupAPI): + pass + + +class SetupV2API(ResourceV2API, SetupAPI): + pass diff --git a/openml/_api/resources/study.py b/openml/_api/resources/study.py new file mode 100644 index 000000000..8de5868d1 --- /dev/null +++ b/openml/_api/resources/study.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from openml._api.resources.base import ResourceV1API, ResourceV2API, StudyAPI + + +class StudyV1API(ResourceV1API, StudyAPI): + pass + + +class StudyV2API(ResourceV2API, StudyAPI): + pass diff --git a/openml/_api/resources/task.py b/openml/_api/resources/task.py new file mode 100644 index 000000000..a97d5f726 --- /dev/null +++ b/openml/_api/resources/task.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from openml._api.resources.base import ResourceV1API, ResourceV2API, TaskAPI + + +class TaskV1API(ResourceV1API, TaskAPI): + pass + + +class TaskV2API(ResourceV2API, TaskAPI): + pass diff --git a/openml/_api/resources/tasks.py b/openml/_api/resources/tasks.py deleted file mode 100644 index 8420f8e57..000000000 --- a/openml/_api/resources/tasks.py +++ /dev/null @@ -1,128 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -import xmltodict - -from openml._api.resources.base import ResourceV1, ResourceV2, TasksAPI -from openml.tasks.task import ( - OpenMLClassificationTask, - OpenMLClusteringTask, - OpenMLLearningCurveTask, - OpenMLRegressionTask, - OpenMLTask, - TaskType, -) - -if TYPE_CHECKING: - from requests import Response - - -class TasksV1(ResourceV1, TasksAPI): - def get( - self, - task_id: int, - *, - return_response: bool = False, - ) -> OpenMLTask | tuple[OpenMLTask, Response]: - path = f"task/{task_id}" - response = self._http.get(path, use_cache=True) - xml_content = response.text - task = self._create_task_from_xml(xml_content) - - if return_response: - return task, response - - return task - - def _create_task_from_xml(self, xml: str) -> OpenMLTask: - """Create a task given a xml string. - - Parameters - ---------- - xml : string - Task xml representation. - - Returns - ------- - OpenMLTask - """ - dic = xmltodict.parse(xml)["oml:task"] - estimation_parameters = {} - inputs = {} - # Due to the unordered structure we obtain, we first have to extract - # the possible keys of oml:input; dic["oml:input"] is a list of - # OrderedDicts - - # Check if there is a list of inputs - if isinstance(dic["oml:input"], list): - for input_ in dic["oml:input"]: - name = input_["@name"] - inputs[name] = input_ - # Single input case - elif isinstance(dic["oml:input"], dict): - name = dic["oml:input"]["@name"] - inputs[name] = dic["oml:input"] - - evaluation_measures = None - if "evaluation_measures" in inputs: - evaluation_measures = inputs["evaluation_measures"]["oml:evaluation_measures"][ - "oml:evaluation_measure" - ] - - task_type = TaskType(int(dic["oml:task_type_id"])) - common_kwargs = { - "task_id": dic["oml:task_id"], - "task_type": dic["oml:task_type"], - "task_type_id": task_type, - "data_set_id": inputs["source_data"]["oml:data_set"]["oml:data_set_id"], - "evaluation_measure": evaluation_measures, - } - # TODO: add OpenMLClusteringTask? - if task_type in ( - TaskType.SUPERVISED_CLASSIFICATION, - TaskType.SUPERVISED_REGRESSION, - TaskType.LEARNING_CURVE, - ): - # Convert some more parameters - for parameter in inputs["estimation_procedure"]["oml:estimation_procedure"][ - "oml:parameter" - ]: - name = parameter["@name"] - text = parameter.get("#text", "") - estimation_parameters[name] = text - - common_kwargs["estimation_procedure_type"] = inputs["estimation_procedure"][ - "oml:estimation_procedure" - ]["oml:type"] - common_kwargs["estimation_procedure_id"] = int( - inputs["estimation_procedure"]["oml:estimation_procedure"]["oml:id"] - ) - - common_kwargs["estimation_parameters"] = estimation_parameters - common_kwargs["target_name"] = inputs["source_data"]["oml:data_set"][ - "oml:target_feature" - ] - common_kwargs["data_splits_url"] = inputs["estimation_procedure"][ - "oml:estimation_procedure" - ]["oml:data_splits_url"] - - cls = { - TaskType.SUPERVISED_CLASSIFICATION: OpenMLClassificationTask, - TaskType.SUPERVISED_REGRESSION: OpenMLRegressionTask, - TaskType.CLUSTERING: OpenMLClusteringTask, - TaskType.LEARNING_CURVE: OpenMLLearningCurveTask, - }.get(task_type) - if cls is None: - raise NotImplementedError(f"Task type {common_kwargs['task_type']} not supported.") - return cls(**common_kwargs) # type: ignore - - -class TasksV2(ResourceV2, TasksAPI): - def get( - self, - task_id: int, # noqa: ARG002 - *, - return_response: bool = False, # noqa: ARG002 - ) -> OpenMLTask | tuple[OpenMLTask, Response]: - self._not_supported(method="get") diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py index d4ae9b688..9c3ff70a5 100644 --- a/openml/_api/runtime/core.py +++ b/openml/_api/runtime/core.py @@ -3,91 +3,188 @@ from pathlib import Path from typing import TYPE_CHECKING -from openml._api.clients import HTTPCache, HTTPClient +from openml._api.clients import HTTPCache, HTTPClient, MinIOClient from openml._api.config import Settings from openml._api.resources import ( - DatasetsV1, - DatasetsV2, + DatasetV1API, + DatasetV2API, + EstimationProcedureV1API, + EstimationProcedureV2API, + EvaluationMeasureV1API, + EvaluationMeasureV2API, + EvaluationV1API, + EvaluationV2API, FallbackProxy, - TasksV1, - TasksV2, + FlowV1API, + FlowV2API, + RunV1API, + RunV2API, + SetupV1API, + SetupV2API, + StudyV1API, + StudyV2API, + TaskV1API, + TaskV2API, ) if TYPE_CHECKING: - from openml._api.resources.base import DatasetsAPI, TasksAPI - - -class APIBackend: - def __init__(self, *, datasets: DatasetsAPI | FallbackProxy, tasks: TasksAPI | FallbackProxy): - self.datasets = datasets - self.tasks = tasks - - -def build_backend(version: str, *, strict: bool) -> APIBackend: - settings = Settings.get() - - # Get config for v1. On first access, this triggers lazy initialization - # from openml.config, reading the user's actual API key, server URL, - # cache directory, and retry settings. This avoids circular imports - # (openml.config is imported inside the method, not at module load time) - # and ensures we use the user's configured values rather than hardcoded defaults. - v1_config = settings.get_api_config("v1") - - http_cache = HTTPCache( - path=Path(settings.cache.dir).expanduser(), - ttl=settings.cache.ttl, + from openml._api.resources.base import ( + DatasetAPI, + EstimationProcedureAPI, + EvaluationAPI, + EvaluationMeasureAPI, + FlowAPI, + RunAPI, + SetupAPI, + StudyAPI, + TaskAPI, ) - v1_http_client = HTTPClient( - server=v1_config.server, - base_url=v1_config.base_url, - api_key=v1_config.api_key, - timeout=v1_config.timeout, - retries=settings.connection.retries, - retry_policy=settings.connection.retry_policy, - cache=http_cache, - ) - v1 = APIBackend( - datasets=DatasetsV1(v1_http_client), - tasks=TasksV1(v1_http_client), - ) +class APIBackend: + def __init__( # noqa: PLR0913 + self, + *, + dataset: DatasetAPI | FallbackProxy, + task: TaskAPI | FallbackProxy, + evaluation_measure: EvaluationMeasureAPI | FallbackProxy, + estimation_procedure: EstimationProcedureAPI | FallbackProxy, + evaluation: EvaluationAPI | FallbackProxy, + flow: FlowAPI | FallbackProxy, + study: StudyAPI | FallbackProxy, + run: RunAPI | FallbackProxy, + setup: SetupAPI | FallbackProxy, + ): + self.dataset = dataset + self.task = task + self.evaluation_measure = evaluation_measure + self.estimation_procedure = estimation_procedure + self.evaluation = evaluation + self.flow = flow + self.study = study + self.run = run + self.setup = setup + + @classmethod + def build(cls, version: str, *, strict: bool) -> APIBackend: + settings = Settings.get() + + # Get config for v1. On first access, this triggers lazy initialization + # from openml.config, reading the user's actual API key, server URL, + # cache directory, and retry settings. This avoids circular imports + # (openml.config is imported inside the method, not at module load time) + # and ensures we use the user's configured values rather than hardcoded defaults. + v1_config = settings.get_api_config("v1") + + http_cache = HTTPCache( + path=Path(settings.cache.dir).expanduser(), + ttl=settings.cache.ttl, + ) + minio_client = MinIOClient( + path=Path(settings.cache.dir).expanduser(), + ) + + v1_http_client = HTTPClient( + server=v1_config.server, + base_url=v1_config.base_url, + api_key=v1_config.api_key, + timeout=v1_config.timeout, + retries=settings.connection.retries, + retry_policy=settings.connection.retry_policy, + cache=http_cache, + ) + v1_dataset = DatasetV1API(v1_http_client, minio_client) + v1_task = TaskV1API(v1_http_client) + v1_evaluation_measure = EvaluationMeasureV1API(v1_http_client) + v1_estimation_procedure = EstimationProcedureV1API(v1_http_client) + v1_evaluation = EvaluationV1API(v1_http_client) + v1_flow = FlowV1API(v1_http_client) + v1_study = StudyV1API(v1_http_client) + v1_run = RunV1API(v1_http_client) + v1_setup = SetupV1API(v1_http_client) + + v1 = cls( + dataset=v1_dataset, + task=v1_task, + evaluation_measure=v1_evaluation_measure, + estimation_procedure=v1_estimation_procedure, + evaluation=v1_evaluation, + flow=v1_flow, + study=v1_study, + run=v1_run, + setup=v1_setup, + ) + + if version == "v1": + return v1 + + # V2 support. Currently v2 is not yet available, + # so get_api_config("v2") raises NotImplementedError. When v2 becomes available, + # its config will be added to Settings._init_from_legacy_config(). + # In strict mode: propagate the error. + # In non-strict mode: silently fall back to v1 only. + try: + v2_config = settings.get_api_config("v2") + except NotImplementedError: + if strict: + raise + # Non-strict mode: fall back to v1 only + return v1 + + v2_http_client = HTTPClient( + server=v2_config.server, + base_url=v2_config.base_url, + api_key=v2_config.api_key, + timeout=v2_config.timeout, + retries=settings.connection.retries, + retry_policy=settings.connection.retry_policy, + cache=http_cache, + ) + v2_dataset = DatasetV2API(v2_http_client, minio_client) + v2_task = TaskV2API(v2_http_client) + v2_evaluation_measure = EvaluationMeasureV2API(v2_http_client) + v2_estimation_procedure = EstimationProcedureV2API(v2_http_client) + v2_evaluation = EvaluationV2API(v2_http_client) + v2_flow = FlowV2API(v2_http_client) + v2_study = StudyV2API(v2_http_client) + v2_run = RunV2API(v2_http_client) + v2_setup = SetupV2API(v2_http_client) + + v2 = cls( + dataset=v2_dataset, + task=v2_task, + evaluation_measure=v2_evaluation_measure, + estimation_procedure=v2_estimation_procedure, + evaluation=v2_evaluation, + flow=v2_flow, + study=v2_study, + run=v2_run, + setup=v2_setup, + ) - if version == "v1": - return v1 - - # V2 support. Currently v2 is not yet available, - # so get_api_config("v2") raises NotImplementedError. When v2 becomes available, - # its config will be added to Settings._init_from_legacy_config(). - # In strict mode: propagate the error. - # In non-strict mode: silently fall back to v1 only. - try: - v2_config = settings.get_api_config("v2") - except NotImplementedError: if strict: - raise - # Non-strict mode: fall back to v1 only - return v1 - - v2_http_client = HTTPClient( - server=v2_config.server, - base_url=v2_config.base_url, - api_key=v2_config.api_key, - timeout=v2_config.timeout, - retries=settings.connection.retries, - retry_policy=settings.connection.retry_policy, - cache=http_cache, - ) - - v2 = APIBackend( - datasets=DatasetsV2(v2_http_client), - tasks=TasksV2(v2_http_client), - ) - - if strict: - return v2 - - return APIBackend( - datasets=FallbackProxy(DatasetsV2(v2_http_client), DatasetsV1(v1_http_client)), - tasks=FallbackProxy(TasksV2(v2_http_client), TasksV1(v1_http_client)), - ) + return v2 + + fallback_dataset = FallbackProxy(v1_dataset, v2_dataset) + fallback_task = FallbackProxy(v1_task, v2_task) + fallback_evaluation_measure = FallbackProxy(v1_evaluation_measure, v2_evaluation_measure) + fallback_estimation_procedure = FallbackProxy( + v1_estimation_procedure, v2_estimation_procedure + ) + fallback_evaluation = FallbackProxy(v1_evaluation, v2_evaluation) + fallback_flow = FallbackProxy(v1_flow, v2_flow) + fallback_study = FallbackProxy(v1_study, v2_study) + fallback_run = FallbackProxy(v1_run, v2_run) + fallback_setup = FallbackProxy(v1_setup, v2_setup) + + return cls( + dataset=fallback_dataset, + task=fallback_task, + evaluation_measure=fallback_evaluation_measure, + estimation_procedure=fallback_estimation_procedure, + evaluation=fallback_evaluation, + flow=fallback_flow, + study=fallback_study, + run=fallback_run, + setup=fallback_setup, + ) diff --git a/openml/_api/runtime/instance.py b/openml/_api/runtime/instance.py index 0d945b084..633d3f372 100644 --- a/openml/_api/runtime/instance.py +++ b/openml/_api/runtime/instance.py @@ -1,5 +1,5 @@ from __future__ import annotations -from openml._api.runtime.core import APIBackend, build_backend +from openml._api.runtime.core import APIBackend -_backend: APIBackend = build_backend("v1", strict=False) +_backend: APIBackend = APIBackend.build(version="v1", strict=False) diff --git a/tests/test_api/test_versions.py b/tests/test_api/test_versions.py index 9eb4c7a91..2203ab6da 100644 --- a/tests/test_api/test_versions.py +++ b/tests/test_api/test_versions.py @@ -1,13 +1,13 @@ import pytest from openml.testing import TestAPIBase -from openml._api.resources.base.versions import ResourceV1 +from openml._api.resources.base.versions import ResourceV1API from openml._api.config import ResourceType -class TestResourceV1(TestAPIBase): +class TestResourceV1API(TestAPIBase): def setUp(self): super().setUp() - self.resource = ResourceV1(self.http_client) + self.resource = ResourceV1API(self.http_client) self.resource.resource_type = ResourceType.TASK @pytest.mark.uses_test_server() From 187813839c57ddb0d12b702f371fe7d08220c963 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 2 Feb 2026 10:37:59 +0500 Subject: [PATCH 40/54] more refactoring with setup/ --- openml/__init__.py | 2 - openml/_api/clients/http.py | 2 +- openml/_api/config.py | 114 ------------- openml/_api/resources/__init__.py | 2 + openml/_api/resources/_registry.py | 48 ++++++ openml/_api/resources/base/base.py | 7 +- openml/_api/resources/base/enums.py | 27 +++ openml/_api/resources/base/resources.py | 11 +- openml/_api/resources/base/versions.py | 2 +- openml/_api/runtime/core.py | 190 --------------------- openml/_api/runtime/instance.py | 5 - openml/_api/{runtime => setup}/__init__.py | 0 openml/_api/setup/builder.py | 71 ++++++++ openml/_api/setup/config.py | 62 +++++++ openml/_api/setup/utils.py | 49 ++++++ openml/testing.py | 2 +- tests/test_api/test_versions.py | 2 +- 17 files changed, 268 insertions(+), 328 deletions(-) delete mode 100644 openml/_api/config.py create mode 100644 openml/_api/resources/_registry.py create mode 100644 openml/_api/resources/base/enums.py delete mode 100644 openml/_api/runtime/core.py delete mode 100644 openml/_api/runtime/instance.py rename openml/_api/{runtime => setup}/__init__.py (100%) create mode 100644 openml/_api/setup/builder.py create mode 100644 openml/_api/setup/config.py create mode 100644 openml/_api/setup/utils.py diff --git a/openml/__init__.py b/openml/__init__.py index a7c95dc2e..ae5db261f 100644 --- a/openml/__init__.py +++ b/openml/__init__.py @@ -33,7 +33,6 @@ utils, ) from .__version__ import __version__ -from ._api.runtime.instance import _backend from .datasets import OpenMLDataFeature, OpenMLDataset from .evaluations import OpenMLEvaluation from .flows import OpenMLFlow @@ -110,7 +109,6 @@ def populate_cache( "OpenMLTask", "__version__", "_api_calls", - "_backend", "config", "datasets", "evaluations", diff --git a/openml/_api/clients/http.py b/openml/_api/clients/http.py index dfcdf5a8a..f700c108a 100644 --- a/openml/_api/clients/http.py +++ b/openml/_api/clients/http.py @@ -16,7 +16,7 @@ from requests import Response from openml.__version__ import __version__ -from openml._api.config import RetryPolicy +from openml._api.resources.base.enums import RetryPolicy from openml.exceptions import ( OpenMLNotAuthorizedError, OpenMLServerError, diff --git a/openml/_api/config.py b/openml/_api/config.py deleted file mode 100644 index 3afbf224f..000000000 --- a/openml/_api/config.py +++ /dev/null @@ -1,114 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from enum import Enum - - -class APIVersion(str, Enum): - V1 = "v1" - V2 = "v2" - - -class ResourceType(str, Enum): - DATASET = "dataset" - TASK = "task" - TASK_TYPE = "task_type" - EVALUATION_MEASURE = "evaluation_measure" - ESTIMATION_PROCEDURE = "estimation_procedure" - EVALUATION = "evaluation" - FLOW = "flow" - STUDY = "study" - RUN = "run" - SETUP = "setup" - USER = "user" - - -class RetryPolicy(str, Enum): - HUMAN = "human" - ROBOT = "robot" - - -@dataclass -class APIConfig: - server: str - base_url: str - api_key: str - timeout: int = 10 # seconds - - -@dataclass -class ConnectionConfig: - retries: int = 3 - retry_policy: RetryPolicy = RetryPolicy.HUMAN - - -@dataclass -class CacheConfig: - dir: str = "~/.openml/cache" - ttl: int = 60 * 60 * 24 * 7 # one week - - -class Settings: - """Settings container that reads from openml.config on access.""" - - _instance: Settings | None = None - - def __init__(self) -> None: - self.api_configs: dict[str, APIConfig] = {} - self.connection = ConnectionConfig() - self.cache = CacheConfig() - self._initialized = False - - @classmethod - def get(cls) -> Settings: - """Get settings singleton, creating on first access.""" - if cls._instance is None: - cls._instance = cls() - return cls._instance - - @classmethod - def reset(cls) -> None: - """Reset the settings singleton. Useful for testing.""" - cls._instance = None - - def get_api_config(self, version: str) -> APIConfig: - """Get API config for a version, with lazy initialization from openml.config.""" - if not self._initialized: - self._init_from_legacy_config() - if version not in self.api_configs: - raise NotImplementedError( - f"API {version} is not yet available. " - f"Supported versions: {list(self.api_configs.keys())}" - ) - return self.api_configs[version] - - def _init_from_legacy_config(self) -> None: - """Lazy init from openml.config to avoid circular imports.""" - if self._initialized: - return - - # Import here (not at module level) to avoid circular imports. - # We read from openml.config to integrate with the existing config system - # where users set their API key, server, cache directory, etc. - # This avoids duplicating those settings with hardcoded values. - import openml.config as legacy - - server_url = legacy.server - server_base = server_url.rsplit("/api", 1)[0] + "/" if "/api" in server_url else server_url - - self.api_configs["v1"] = APIConfig( - server=server_base, - base_url="api/v1/xml/", - api_key=legacy.apikey, - ) - - # Sync connection- and cache- settings from legacy config - self.connection = ConnectionConfig( - retries=legacy.connection_n_retries, - retry_policy=RetryPolicy(legacy.retry_policy), - ) - self.cache = CacheConfig( - dir=str(legacy._root_cache_directory), - ) - - self._initialized = True diff --git a/openml/_api/resources/__init__.py b/openml/_api/resources/__init__.py index b666c018b..a3dc63798 100644 --- a/openml/_api/resources/__init__.py +++ b/openml/_api/resources/__init__.py @@ -1,3 +1,4 @@ +from openml._api.resources._registry import API_REGISTRY from openml._api.resources.base.fallback import FallbackProxy from openml._api.resources.dataset import DatasetV1API, DatasetV2API from openml._api.resources.estimation_procedure import ( @@ -13,6 +14,7 @@ from openml._api.resources.task import TaskV1API, TaskV2API __all__ = [ + "API_REGISTRY", "DatasetV1API", "DatasetV2API", "EstimationProcedureV1API", diff --git a/openml/_api/resources/_registry.py b/openml/_api/resources/_registry.py new file mode 100644 index 000000000..e8746f481 --- /dev/null +++ b/openml/_api/resources/_registry.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from openml._api.resources.base.enums import APIVersion, ResourceType +from openml._api.resources.dataset import DatasetV1API, DatasetV2API +from openml._api.resources.estimation_procedure import ( + EstimationProcedureV1API, + EstimationProcedureV2API, +) +from openml._api.resources.evaluation import EvaluationV1API, EvaluationV2API +from openml._api.resources.evaluation_measure import EvaluationMeasureV1API, EvaluationMeasureV2API +from openml._api.resources.flow import FlowV1API, FlowV2API +from openml._api.resources.run import RunV1API, RunV2API +from openml._api.resources.setup import SetupV1API, SetupV2API +from openml._api.resources.study import StudyV1API, StudyV2API +from openml._api.resources.task import TaskV1API, TaskV2API + +if TYPE_CHECKING: + from openml._api.resources.base import ResourceAPI + +API_REGISTRY: dict[ + APIVersion, + dict[ResourceType, type[ResourceAPI]], +] = { + APIVersion.V1: { + ResourceType.DATASET: DatasetV1API, + ResourceType.TASK: TaskV1API, + ResourceType.EVALUATION_MEASURE: EvaluationMeasureV1API, + ResourceType.ESTIMATION_PROCEDURE: EstimationProcedureV1API, + ResourceType.EVALUATION: EvaluationV1API, + ResourceType.FLOW: FlowV1API, + ResourceType.STUDY: StudyV1API, + ResourceType.RUN: RunV1API, + ResourceType.SETUP: SetupV1API, + }, + APIVersion.V2: { + ResourceType.DATASET: DatasetV2API, + ResourceType.TASK: TaskV2API, + ResourceType.EVALUATION_MEASURE: EvaluationMeasureV2API, + ResourceType.ESTIMATION_PROCEDURE: EstimationProcedureV2API, + ResourceType.EVALUATION: EvaluationV2API, + ResourceType.FLOW: FlowV2API, + ResourceType.STUDY: StudyV2API, + ResourceType.RUN: RunV2API, + ResourceType.SETUP: SetupV2API, + }, +} diff --git a/openml/_api/resources/base/base.py b/openml/_api/resources/base/base.py index dbe3e95ea..6a47f83f4 100644 --- a/openml/_api/resources/base/base.py +++ b/openml/_api/resources/base/base.py @@ -9,16 +9,17 @@ from collections.abc import Mapping from typing import Any - from openml._api.clients import HTTPClient - from openml._api.config import APIVersion, ResourceType + from openml._api.clients import HTTPClient, MinIOClient + from openml._api.resources.base.enums import APIVersion, ResourceType class ResourceAPI(ABC): api_version: APIVersion resource_type: ResourceType - def __init__(self, http: HTTPClient): + def __init__(self, http: HTTPClient, minio: MinIOClient | None = None): self._http = http + self._minio = minio @abstractmethod def delete(self, resource_id: int) -> bool: ... diff --git a/openml/_api/resources/base/enums.py b/openml/_api/resources/base/enums.py new file mode 100644 index 000000000..13201b3ec --- /dev/null +++ b/openml/_api/resources/base/enums.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from enum import Enum + + +class APIVersion(str, Enum): + V1 = "v1" + V2 = "v2" + + +class ResourceType(str, Enum): + DATASET = "dataset" + TASK = "task" + TASK_TYPE = "task_type" + EVALUATION_MEASURE = "evaluation_measure" + ESTIMATION_PROCEDURE = "estimation_procedure" + EVALUATION = "evaluation" + FLOW = "flow" + STUDY = "study" + RUN = "run" + SETUP = "setup" + USER = "user" + + +class RetryPolicy(str, Enum): + HUMAN = "human" + ROBOT = "robot" diff --git a/openml/_api/resources/base/resources.py b/openml/_api/resources/base/resources.py index 200278fc2..270472029 100644 --- a/openml/_api/resources/base/resources.py +++ b/openml/_api/resources/base/resources.py @@ -1,21 +1,12 @@ from __future__ import annotations -from typing import TYPE_CHECKING - -from openml._api.config import ResourceType from openml._api.resources.base import ResourceAPI - -if TYPE_CHECKING: - from openml._api.clients import HTTPClient, MinIOClient +from openml._api.resources.base.enums import ResourceType class DatasetAPI(ResourceAPI): resource_type: ResourceType = ResourceType.DATASET - def __init__(self, http: HTTPClient, minio: MinIOClient): - self._minio = minio - super().__init__(http) - class TaskAPI(ResourceAPI): resource_type: ResourceType = ResourceType.TASK diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py index 88ae87a1c..f8b21a469 100644 --- a/openml/_api/resources/base/versions.py +++ b/openml/_api/resources/base/versions.py @@ -5,8 +5,8 @@ import xmltodict -from openml._api.config import APIVersion, ResourceType from openml._api.resources.base import ResourceAPI +from openml._api.resources.base.enums import APIVersion, ResourceType from openml.exceptions import ( OpenMLNotAuthorizedError, OpenMLServerError, diff --git a/openml/_api/runtime/core.py b/openml/_api/runtime/core.py deleted file mode 100644 index 9c3ff70a5..000000000 --- a/openml/_api/runtime/core.py +++ /dev/null @@ -1,190 +0,0 @@ -from __future__ import annotations - -from pathlib import Path -from typing import TYPE_CHECKING - -from openml._api.clients import HTTPCache, HTTPClient, MinIOClient -from openml._api.config import Settings -from openml._api.resources import ( - DatasetV1API, - DatasetV2API, - EstimationProcedureV1API, - EstimationProcedureV2API, - EvaluationMeasureV1API, - EvaluationMeasureV2API, - EvaluationV1API, - EvaluationV2API, - FallbackProxy, - FlowV1API, - FlowV2API, - RunV1API, - RunV2API, - SetupV1API, - SetupV2API, - StudyV1API, - StudyV2API, - TaskV1API, - TaskV2API, -) - -if TYPE_CHECKING: - from openml._api.resources.base import ( - DatasetAPI, - EstimationProcedureAPI, - EvaluationAPI, - EvaluationMeasureAPI, - FlowAPI, - RunAPI, - SetupAPI, - StudyAPI, - TaskAPI, - ) - - -class APIBackend: - def __init__( # noqa: PLR0913 - self, - *, - dataset: DatasetAPI | FallbackProxy, - task: TaskAPI | FallbackProxy, - evaluation_measure: EvaluationMeasureAPI | FallbackProxy, - estimation_procedure: EstimationProcedureAPI | FallbackProxy, - evaluation: EvaluationAPI | FallbackProxy, - flow: FlowAPI | FallbackProxy, - study: StudyAPI | FallbackProxy, - run: RunAPI | FallbackProxy, - setup: SetupAPI | FallbackProxy, - ): - self.dataset = dataset - self.task = task - self.evaluation_measure = evaluation_measure - self.estimation_procedure = estimation_procedure - self.evaluation = evaluation - self.flow = flow - self.study = study - self.run = run - self.setup = setup - - @classmethod - def build(cls, version: str, *, strict: bool) -> APIBackend: - settings = Settings.get() - - # Get config for v1. On first access, this triggers lazy initialization - # from openml.config, reading the user's actual API key, server URL, - # cache directory, and retry settings. This avoids circular imports - # (openml.config is imported inside the method, not at module load time) - # and ensures we use the user's configured values rather than hardcoded defaults. - v1_config = settings.get_api_config("v1") - - http_cache = HTTPCache( - path=Path(settings.cache.dir).expanduser(), - ttl=settings.cache.ttl, - ) - minio_client = MinIOClient( - path=Path(settings.cache.dir).expanduser(), - ) - - v1_http_client = HTTPClient( - server=v1_config.server, - base_url=v1_config.base_url, - api_key=v1_config.api_key, - timeout=v1_config.timeout, - retries=settings.connection.retries, - retry_policy=settings.connection.retry_policy, - cache=http_cache, - ) - v1_dataset = DatasetV1API(v1_http_client, minio_client) - v1_task = TaskV1API(v1_http_client) - v1_evaluation_measure = EvaluationMeasureV1API(v1_http_client) - v1_estimation_procedure = EstimationProcedureV1API(v1_http_client) - v1_evaluation = EvaluationV1API(v1_http_client) - v1_flow = FlowV1API(v1_http_client) - v1_study = StudyV1API(v1_http_client) - v1_run = RunV1API(v1_http_client) - v1_setup = SetupV1API(v1_http_client) - - v1 = cls( - dataset=v1_dataset, - task=v1_task, - evaluation_measure=v1_evaluation_measure, - estimation_procedure=v1_estimation_procedure, - evaluation=v1_evaluation, - flow=v1_flow, - study=v1_study, - run=v1_run, - setup=v1_setup, - ) - - if version == "v1": - return v1 - - # V2 support. Currently v2 is not yet available, - # so get_api_config("v2") raises NotImplementedError. When v2 becomes available, - # its config will be added to Settings._init_from_legacy_config(). - # In strict mode: propagate the error. - # In non-strict mode: silently fall back to v1 only. - try: - v2_config = settings.get_api_config("v2") - except NotImplementedError: - if strict: - raise - # Non-strict mode: fall back to v1 only - return v1 - - v2_http_client = HTTPClient( - server=v2_config.server, - base_url=v2_config.base_url, - api_key=v2_config.api_key, - timeout=v2_config.timeout, - retries=settings.connection.retries, - retry_policy=settings.connection.retry_policy, - cache=http_cache, - ) - v2_dataset = DatasetV2API(v2_http_client, minio_client) - v2_task = TaskV2API(v2_http_client) - v2_evaluation_measure = EvaluationMeasureV2API(v2_http_client) - v2_estimation_procedure = EstimationProcedureV2API(v2_http_client) - v2_evaluation = EvaluationV2API(v2_http_client) - v2_flow = FlowV2API(v2_http_client) - v2_study = StudyV2API(v2_http_client) - v2_run = RunV2API(v2_http_client) - v2_setup = SetupV2API(v2_http_client) - - v2 = cls( - dataset=v2_dataset, - task=v2_task, - evaluation_measure=v2_evaluation_measure, - estimation_procedure=v2_estimation_procedure, - evaluation=v2_evaluation, - flow=v2_flow, - study=v2_study, - run=v2_run, - setup=v2_setup, - ) - - if strict: - return v2 - - fallback_dataset = FallbackProxy(v1_dataset, v2_dataset) - fallback_task = FallbackProxy(v1_task, v2_task) - fallback_evaluation_measure = FallbackProxy(v1_evaluation_measure, v2_evaluation_measure) - fallback_estimation_procedure = FallbackProxy( - v1_estimation_procedure, v2_estimation_procedure - ) - fallback_evaluation = FallbackProxy(v1_evaluation, v2_evaluation) - fallback_flow = FallbackProxy(v1_flow, v2_flow) - fallback_study = FallbackProxy(v1_study, v2_study) - fallback_run = FallbackProxy(v1_run, v2_run) - fallback_setup = FallbackProxy(v1_setup, v2_setup) - - return cls( - dataset=fallback_dataset, - task=fallback_task, - evaluation_measure=fallback_evaluation_measure, - estimation_procedure=fallback_estimation_procedure, - evaluation=fallback_evaluation, - flow=fallback_flow, - study=fallback_study, - run=fallback_run, - setup=fallback_setup, - ) diff --git a/openml/_api/runtime/instance.py b/openml/_api/runtime/instance.py deleted file mode 100644 index 633d3f372..000000000 --- a/openml/_api/runtime/instance.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import annotations - -from openml._api.runtime.core import APIBackend - -_backend: APIBackend = APIBackend.build(version="v1", strict=False) diff --git a/openml/_api/runtime/__init__.py b/openml/_api/setup/__init__.py similarity index 100% rename from openml/_api/runtime/__init__.py rename to openml/_api/setup/__init__.py diff --git a/openml/_api/setup/builder.py b/openml/_api/setup/builder.py new file mode 100644 index 000000000..4f4b843d7 --- /dev/null +++ b/openml/_api/setup/builder.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +from collections.abc import Mapping +from pathlib import Path +from typing import TYPE_CHECKING + +from openml._api.clients import HTTPCache, HTTPClient, MinIOClient +from openml._api.resources import API_REGISTRY, FallbackProxy + +if TYPE_CHECKING: + from openml._api.resources.base import ResourceAPI + from openml._api.resources.base.enums import ResourceType + from openml._api.setup.config import Config + + +class APIBackendBuilder: + def __init__( + self, + resource_apis: Mapping[ResourceType, ResourceAPI | FallbackProxy], + ): + for resource_type, resource_api in resource_apis.items(): + setattr(self, resource_type.value, resource_api) + + @classmethod + def build(cls, config: Config) -> APIBackendBuilder: + cache_dir = Path(config.cache.dir).expanduser() + + http_cache = HTTPCache(path=cache_dir, ttl=config.cache.ttl) + minio_client = MinIOClient(path=cache_dir) + + primary_api_config = config.api_configs[config.api_version] + primary_http_client = HTTPClient( + server=primary_api_config.server, + base_url=primary_api_config.base_url, + api_key=primary_api_config.api_key, + timeout=config.connection.timeout, + retries=config.connection.retries, + retry_policy=config.connection.retry_policy, + cache=http_cache, + ) + + resource_apis: dict[ResourceType, ResourceAPI] = {} + for resource_type, resource_api_cls in API_REGISTRY[config.api_version].items(): + resource_apis[resource_type] = resource_api_cls(primary_http_client, minio_client) + + if config.fallback_api_version is None: + return cls(resource_apis) + + fallback_api_config = config.api_configs[config.fallback_api_version] + fallback_http_client = HTTPClient( + server=fallback_api_config.server, + base_url=fallback_api_config.base_url, + api_key=fallback_api_config.api_key, + timeout=config.connection.timeout, + retries=config.connection.retries, + retry_policy=config.connection.retry_policy, + cache=http_cache, + ) + + fallback_resource_apis: dict[ResourceType, ResourceAPI] = {} + for resource_type, resource_api_cls in API_REGISTRY[config.fallback_api_version].items(): + fallback_resource_apis[resource_type] = resource_api_cls( + fallback_http_client, minio_client + ) + + merged: dict[ResourceType, FallbackProxy] = { + name: FallbackProxy(resource_apis[name], fallback_resource_apis[name]) + for name in resource_apis + } + + return cls(merged) diff --git a/openml/_api/setup/config.py b/openml/_api/setup/config.py new file mode 100644 index 000000000..0f783a23e --- /dev/null +++ b/openml/_api/setup/config.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from dataclasses import dataclass, field + +from openml._api.resources.base.enums import APIVersion, RetryPolicy +from openml._api.setup.utils import _resolve_default_cache_dir + + +@dataclass +class APIConfig: + server: str + base_url: str + api_key: str + + +@dataclass +class ConnectionConfig: + retries: int + retry_policy: RetryPolicy + timeout: int + + +@dataclass +class CacheConfig: + dir: str + ttl: int + + +@dataclass +class Config: + api_version: APIVersion = APIVersion.V1 + fallback_api_version: APIVersion | None = None + + api_configs: dict[APIVersion, APIConfig] = field( + default_factory=lambda: { + APIVersion.V1: APIConfig( + server="https://www.openml.org/", + base_url="api/v1/xml/", + api_key="", + ), + APIVersion.V2: APIConfig( + server="http://localhost:8002/", + base_url="", + api_key="", + ), + } + ) + + connection: ConnectionConfig = field( + default_factory=lambda: ConnectionConfig( + retries=5, + retry_policy=RetryPolicy.HUMAN, + timeout=10, + ) + ) + + cache: CacheConfig = field( + default_factory=lambda: CacheConfig( + dir=str(_resolve_default_cache_dir()), + ttl=60 * 60 * 24 * 7, + ) + ) diff --git a/openml/_api/setup/utils.py b/openml/_api/setup/utils.py new file mode 100644 index 000000000..ddcf5b41c --- /dev/null +++ b/openml/_api/setup/utils.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +import logging +import os +import platform +from pathlib import Path + +openml_logger = logging.getLogger("openml") + +# Default values (see also https://github.com/openml/OpenML/wiki/Client-API-Standards) +_user_path = Path("~").expanduser().absolute() + + +def _resolve_default_cache_dir() -> Path: + user_defined_cache_dir = os.environ.get("OPENML_CACHE_DIR") + if user_defined_cache_dir is not None: + return Path(user_defined_cache_dir) + + if platform.system().lower() != "linux": + return _user_path / ".openml" + + xdg_cache_home = os.environ.get("XDG_CACHE_HOME") + if xdg_cache_home is None: + return Path("~", ".cache", "openml") + + # This is the proper XDG_CACHE_HOME directory, but + # we unfortunately had a problem where we used XDG_CACHE_HOME/org, + # we check heuristically if this old directory still exists and issue + # a warning if it does. There's too much data to move to do this for the user. + + # The new cache directory exists + cache_dir = Path(xdg_cache_home) / "openml" + if cache_dir.exists(): + return cache_dir + + # The old cache directory *does not* exist + heuristic_dir_for_backwards_compat = Path(xdg_cache_home) / "org" / "openml" + if not heuristic_dir_for_backwards_compat.exists(): + return cache_dir + + root_dir_to_delete = Path(xdg_cache_home) / "org" + openml_logger.warning( + "An old cache directory was found at '%s'. This directory is no longer used by " + "OpenML-Python. To silence this warning you would need to delete the old cache " + "directory. The cached files will then be located in '%s'.", + root_dir_to_delete, + cache_dir, + ) + return Path(xdg_cache_home) diff --git a/openml/testing.py b/openml/testing.py index b0aaac9be..18e03fb86 100644 --- a/openml/testing.py +++ b/openml/testing.py @@ -17,7 +17,7 @@ import openml from openml._api.clients import HTTPCache, HTTPClient -from openml._api.config import RetryPolicy +from openml._api.resources.base.enums import RetryPolicy from openml.exceptions import OpenMLServerException from openml.tasks import TaskType diff --git a/tests/test_api/test_versions.py b/tests/test_api/test_versions.py index 2203ab6da..fd41feb2a 100644 --- a/tests/test_api/test_versions.py +++ b/tests/test_api/test_versions.py @@ -1,7 +1,7 @@ import pytest from openml.testing import TestAPIBase from openml._api.resources.base.versions import ResourceV1API -from openml._api.config import ResourceType +from openml._api.resources.base.enums import ResourceType class TestResourceV1API(TestAPIBase): From dc26e016e02b4ed23961f148234398582b152e6f Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 2 Feb 2026 10:40:03 +0500 Subject: [PATCH 41/54] implement APIBackend as controller --- openml/__init__.py | 2 ++ openml/_api/setup/_instance.py | 5 +++ openml/_api/setup/backend.py | 62 ++++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 openml/_api/setup/_instance.py create mode 100644 openml/_api/setup/backend.py diff --git a/openml/__init__.py b/openml/__init__.py index ae5db261f..fdf3b90e4 100644 --- a/openml/__init__.py +++ b/openml/__init__.py @@ -33,6 +33,7 @@ utils, ) from .__version__ import __version__ +from ._api.setup._instance import _backend from .datasets import OpenMLDataFeature, OpenMLDataset from .evaluations import OpenMLEvaluation from .flows import OpenMLFlow @@ -109,6 +110,7 @@ def populate_cache( "OpenMLTask", "__version__", "_api_calls", + "_backend", "config", "datasets", "evaluations", diff --git a/openml/_api/setup/_instance.py b/openml/_api/setup/_instance.py new file mode 100644 index 000000000..2d9818a0d --- /dev/null +++ b/openml/_api/setup/_instance.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from openml._api.setup.backend import APIBackend + +_backend = APIBackend.get_instance() diff --git a/openml/_api/setup/backend.py b/openml/_api/setup/backend.py new file mode 100644 index 000000000..7c300e143 --- /dev/null +++ b/openml/_api/setup/backend.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from copy import deepcopy +from typing import Any + +from openml._api.setup.builder import APIBackendBuilder +from openml._api.setup.config import Config + + +class APIBackend: + _instance: APIBackend | None = None + + def __init__(self, config: Config | None = None): + self._config: Config = config or Config() + self._backend = APIBackendBuilder.build(self._config) + + def __getattr__(self, name: str) -> Any: + """ + Delegate attribute access to the underlying backend. + Called only if attribute is not found on RuntimeBackend. + """ + return getattr(self._backend, name) + + @classmethod + def get_instance(cls) -> APIBackend: + if cls._instance is None: + cls._instance = cls() + return cls._instance + + @classmethod + def get_config(cls) -> Config: + return deepcopy(cls.get_instance()._config) + + @classmethod + def set_config(cls, config: Config) -> None: + instance = cls.get_instance() + instance._config = config + instance._backend = APIBackendBuilder.build(config) + + @classmethod + def get_config_value(cls, key: str) -> Config: + keys = key.split(".") + config_value = cls.get_instance()._config + for k in keys: + if isinstance(config_value, dict): + config_value = config_value[k] + else: + config_value = getattr(config_value, k) + return deepcopy(config_value) + + @classmethod + def set_config_value(cls, key: str, value: Any) -> None: + keys = key.split(".") + config = cls.get_instance()._config + parent = config + for k in keys[:-1]: + parent = parent[k] if isinstance(parent, dict) else getattr(parent, k) + if isinstance(parent, dict): + parent[keys[-1]] = value + else: + setattr(parent, keys[-1], value) + cls.set_config(config) From e2d059b110da6d6b1355773b5b1b35689e977dca Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 2 Feb 2026 12:05:33 +0500 Subject: [PATCH 42/54] move enums --- openml/_api/clients/http.py | 2 +- openml/_api/resources/_registry.py | 2 +- openml/_api/resources/base/base.py | 2 +- openml/_api/resources/base/resources.py | 2 +- openml/_api/resources/base/versions.py | 2 +- openml/_api/setup/builder.py | 2 +- openml/_api/setup/config.py | 2 +- openml/{_api/resources/base => }/enums.py | 6 ++++++ openml/testing.py | 2 +- tests/test_api/test_versions.py | 2 +- 10 files changed, 15 insertions(+), 9 deletions(-) rename openml/{_api/resources/base => }/enums.py (76%) diff --git a/openml/_api/clients/http.py b/openml/_api/clients/http.py index f700c108a..353cd5e9e 100644 --- a/openml/_api/clients/http.py +++ b/openml/_api/clients/http.py @@ -16,7 +16,7 @@ from requests import Response from openml.__version__ import __version__ -from openml._api.resources.base.enums import RetryPolicy +from openml.enums import RetryPolicy from openml.exceptions import ( OpenMLNotAuthorizedError, OpenMLServerError, diff --git a/openml/_api/resources/_registry.py b/openml/_api/resources/_registry.py index e8746f481..b1a5f2b74 100644 --- a/openml/_api/resources/_registry.py +++ b/openml/_api/resources/_registry.py @@ -2,7 +2,6 @@ from typing import TYPE_CHECKING -from openml._api.resources.base.enums import APIVersion, ResourceType from openml._api.resources.dataset import DatasetV1API, DatasetV2API from openml._api.resources.estimation_procedure import ( EstimationProcedureV1API, @@ -15,6 +14,7 @@ from openml._api.resources.setup import SetupV1API, SetupV2API from openml._api.resources.study import StudyV1API, StudyV2API from openml._api.resources.task import TaskV1API, TaskV2API +from openml.enums import APIVersion, ResourceType if TYPE_CHECKING: from openml._api.resources.base import ResourceAPI diff --git a/openml/_api/resources/base/base.py b/openml/_api/resources/base/base.py index 6a47f83f4..5eadc4932 100644 --- a/openml/_api/resources/base/base.py +++ b/openml/_api/resources/base/base.py @@ -10,7 +10,7 @@ from typing import Any from openml._api.clients import HTTPClient, MinIOClient - from openml._api.resources.base.enums import APIVersion, ResourceType + from openml.enums import APIVersion, ResourceType class ResourceAPI(ABC): diff --git a/openml/_api/resources/base/resources.py b/openml/_api/resources/base/resources.py index 270472029..5c4dde9de 100644 --- a/openml/_api/resources/base/resources.py +++ b/openml/_api/resources/base/resources.py @@ -1,7 +1,7 @@ from __future__ import annotations from openml._api.resources.base import ResourceAPI -from openml._api.resources.base.enums import ResourceType +from openml.enums import ResourceType class DatasetAPI(ResourceAPI): diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py index f8b21a469..a98a0ad43 100644 --- a/openml/_api/resources/base/versions.py +++ b/openml/_api/resources/base/versions.py @@ -6,7 +6,7 @@ import xmltodict from openml._api.resources.base import ResourceAPI -from openml._api.resources.base.enums import APIVersion, ResourceType +from openml.enums import APIVersion, ResourceType from openml.exceptions import ( OpenMLNotAuthorizedError, OpenMLServerError, diff --git a/openml/_api/setup/builder.py b/openml/_api/setup/builder.py index 4f4b843d7..135b18da3 100644 --- a/openml/_api/setup/builder.py +++ b/openml/_api/setup/builder.py @@ -9,8 +9,8 @@ if TYPE_CHECKING: from openml._api.resources.base import ResourceAPI - from openml._api.resources.base.enums import ResourceType from openml._api.setup.config import Config + from openml.enums import ResourceType class APIBackendBuilder: diff --git a/openml/_api/setup/config.py b/openml/_api/setup/config.py index 0f783a23e..64e790404 100644 --- a/openml/_api/setup/config.py +++ b/openml/_api/setup/config.py @@ -2,8 +2,8 @@ from dataclasses import dataclass, field -from openml._api.resources.base.enums import APIVersion, RetryPolicy from openml._api.setup.utils import _resolve_default_cache_dir +from openml.enums import APIVersion, RetryPolicy @dataclass diff --git a/openml/_api/resources/base/enums.py b/openml/enums.py similarity index 76% rename from openml/_api/resources/base/enums.py rename to openml/enums.py index 13201b3ec..f5a4381b7 100644 --- a/openml/_api/resources/base/enums.py +++ b/openml/enums.py @@ -4,11 +4,15 @@ class APIVersion(str, Enum): + """Supported OpenML API versions.""" + V1 = "v1" V2 = "v2" class ResourceType(str, Enum): + """Canonical resource types exposed by the OpenML API.""" + DATASET = "dataset" TASK = "task" TASK_TYPE = "task_type" @@ -23,5 +27,7 @@ class ResourceType(str, Enum): class RetryPolicy(str, Enum): + """Retry behavior for failed API requests.""" + HUMAN = "human" ROBOT = "robot" diff --git a/openml/testing.py b/openml/testing.py index 18e03fb86..3ca2d1b76 100644 --- a/openml/testing.py +++ b/openml/testing.py @@ -17,7 +17,7 @@ import openml from openml._api.clients import HTTPCache, HTTPClient -from openml._api.resources.base.enums import RetryPolicy +from openml.enums import RetryPolicy from openml.exceptions import OpenMLServerException from openml.tasks import TaskType diff --git a/tests/test_api/test_versions.py b/tests/test_api/test_versions.py index fd41feb2a..a7451f3ae 100644 --- a/tests/test_api/test_versions.py +++ b/tests/test_api/test_versions.py @@ -1,7 +1,7 @@ import pytest from openml.testing import TestAPIBase from openml._api.resources.base.versions import ResourceV1API -from openml._api.resources.base.enums import ResourceType +from openml.enums import ResourceType class TestResourceV1API(TestAPIBase): From d156ad4e6f1c1d2488242419baf20f5e5fa0e219 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 2 Feb 2026 12:21:17 +0500 Subject: [PATCH 43/54] module level imports --- openml/_api/__init__.py | 69 +++++++++++++++++++ openml/_api/resources/__init__.py | 23 ++++--- openml/_api/resources/_registry.py | 23 ++++--- openml/_api/resources/base/__init__.py | 8 +-- openml/_api/resources/base/resources.py | 3 +- openml/_api/resources/base/versions.py | 3 +- openml/_api/resources/dataset.py | 2 +- openml/_api/resources/estimation_procedure.py | 2 +- openml/_api/resources/evaluation.py | 2 +- openml/_api/resources/evaluation_measure.py | 2 +- openml/_api/resources/flow.py | 2 +- openml/_api/resources/run.py | 2 +- openml/_api/resources/setup.py | 2 +- openml/_api/resources/study.py | 2 +- openml/_api/resources/task.py | 2 +- openml/_api/setup/__init__.py | 12 ++++ openml/_api/setup/_instance.py | 2 +- openml/_api/setup/backend.py | 4 +- openml/_api/setup/builder.py | 6 +- openml/_api/setup/config.py | 3 +- openml/_api/setup/utils.py | 49 ------------- 21 files changed, 130 insertions(+), 93 deletions(-) delete mode 100644 openml/_api/setup/utils.py diff --git a/openml/_api/__init__.py b/openml/_api/__init__.py index e69de29bb..25bc2f262 100644 --- a/openml/_api/__init__.py +++ b/openml/_api/__init__.py @@ -0,0 +1,69 @@ +from .clients import ( + HTTPCache, + HTTPClient, + MinIOClient, +) +from .resources import ( + API_REGISTRY, + DatasetV1API, + DatasetV2API, + EstimationProcedureV1API, + EstimationProcedureV2API, + EvaluationMeasureV1API, + EvaluationMeasureV2API, + EvaluationV1API, + EvaluationV2API, + FallbackProxy, + FlowV1API, + FlowV2API, + ResourceAPI, + RunV1API, + RunV2API, + SetupV1API, + SetupV2API, + StudyV1API, + StudyV2API, + TaskV1API, + TaskV2API, +) +from .setup import ( + APIBackend, + APIBackendBuilder, + APIConfig, + CacheConfig, + Config, + ConnectionConfig, +) + +__all__ = [ + "API_REGISTRY", + "APIBackend", + "APIBackendBuilder", + "APIConfig", + "CacheConfig", + "Config", + "ConnectionConfig", + "DatasetV1API", + "DatasetV2API", + "EstimationProcedureV1API", + "EstimationProcedureV2API", + "EvaluationMeasureV1API", + "EvaluationMeasureV2API", + "EvaluationV1API", + "EvaluationV2API", + "FallbackProxy", + "FlowV1API", + "FlowV2API", + "HTTPCache", + "HTTPClient", + "MinIOClient", + "ResourceAPI", + "RunV1API", + "RunV2API", + "SetupV1API", + "SetupV2API", + "StudyV1API", + "StudyV2API", + "TaskV1API", + "TaskV2API", +] diff --git a/openml/_api/resources/__init__.py b/openml/_api/resources/__init__.py index a3dc63798..863ec0f72 100644 --- a/openml/_api/resources/__init__.py +++ b/openml/_api/resources/__init__.py @@ -1,17 +1,17 @@ -from openml._api.resources._registry import API_REGISTRY -from openml._api.resources.base.fallback import FallbackProxy -from openml._api.resources.dataset import DatasetV1API, DatasetV2API -from openml._api.resources.estimation_procedure import ( +from ._registry import API_REGISTRY +from .base import FallbackProxy, ResourceAPI +from .dataset import DatasetV1API, DatasetV2API +from .estimation_procedure import ( EstimationProcedureV1API, EstimationProcedureV2API, ) -from openml._api.resources.evaluation import EvaluationV1API, EvaluationV2API -from openml._api.resources.evaluation_measure import EvaluationMeasureV1API, EvaluationMeasureV2API -from openml._api.resources.flow import FlowV1API, FlowV2API -from openml._api.resources.run import RunV1API, RunV2API -from openml._api.resources.setup import SetupV1API, SetupV2API -from openml._api.resources.study import StudyV1API, StudyV2API -from openml._api.resources.task import TaskV1API, TaskV2API +from .evaluation import EvaluationV1API, EvaluationV2API +from .evaluation_measure import EvaluationMeasureV1API, EvaluationMeasureV2API +from .flow import FlowV1API, FlowV2API +from .run import RunV1API, RunV2API +from .setup import SetupV1API, SetupV2API +from .study import StudyV1API, StudyV2API +from .task import TaskV1API, TaskV2API __all__ = [ "API_REGISTRY", @@ -26,6 +26,7 @@ "FallbackProxy", "FlowV1API", "FlowV2API", + "ResourceAPI", "RunV1API", "RunV2API", "SetupV1API", diff --git a/openml/_api/resources/_registry.py b/openml/_api/resources/_registry.py index b1a5f2b74..66d7ec428 100644 --- a/openml/_api/resources/_registry.py +++ b/openml/_api/resources/_registry.py @@ -2,22 +2,23 @@ from typing import TYPE_CHECKING -from openml._api.resources.dataset import DatasetV1API, DatasetV2API -from openml._api.resources.estimation_procedure import ( +from openml.enums import APIVersion, ResourceType + +from .dataset import DatasetV1API, DatasetV2API +from .estimation_procedure import ( EstimationProcedureV1API, EstimationProcedureV2API, ) -from openml._api.resources.evaluation import EvaluationV1API, EvaluationV2API -from openml._api.resources.evaluation_measure import EvaluationMeasureV1API, EvaluationMeasureV2API -from openml._api.resources.flow import FlowV1API, FlowV2API -from openml._api.resources.run import RunV1API, RunV2API -from openml._api.resources.setup import SetupV1API, SetupV2API -from openml._api.resources.study import StudyV1API, StudyV2API -from openml._api.resources.task import TaskV1API, TaskV2API -from openml.enums import APIVersion, ResourceType +from .evaluation import EvaluationV1API, EvaluationV2API +from .evaluation_measure import EvaluationMeasureV1API, EvaluationMeasureV2API +from .flow import FlowV1API, FlowV2API +from .run import RunV1API, RunV2API +from .setup import SetupV1API, SetupV2API +from .study import StudyV1API, StudyV2API +from .task import TaskV1API, TaskV2API if TYPE_CHECKING: - from openml._api.resources.base import ResourceAPI + from .base import ResourceAPI API_REGISTRY: dict[ APIVersion, diff --git a/openml/_api/resources/base/__init__.py b/openml/_api/resources/base/__init__.py index f222a0b87..ed6dc26f7 100644 --- a/openml/_api/resources/base/__init__.py +++ b/openml/_api/resources/base/__init__.py @@ -1,6 +1,6 @@ -from openml._api.resources.base.base import ResourceAPI -from openml._api.resources.base.fallback import FallbackProxy -from openml._api.resources.base.resources import ( +from .base import ResourceAPI +from .fallback import FallbackProxy +from .resources import ( DatasetAPI, EstimationProcedureAPI, EvaluationAPI, @@ -11,7 +11,7 @@ StudyAPI, TaskAPI, ) -from openml._api.resources.base.versions import ResourceV1API, ResourceV2API +from .versions import ResourceV1API, ResourceV2API __all__ = [ "DatasetAPI", diff --git a/openml/_api/resources/base/resources.py b/openml/_api/resources/base/resources.py index 5c4dde9de..8ccd5776e 100644 --- a/openml/_api/resources/base/resources.py +++ b/openml/_api/resources/base/resources.py @@ -1,8 +1,9 @@ from __future__ import annotations -from openml._api.resources.base import ResourceAPI from openml.enums import ResourceType +from .base import ResourceAPI + class DatasetAPI(ResourceAPI): resource_type: ResourceType = ResourceType.DATASET diff --git a/openml/_api/resources/base/versions.py b/openml/_api/resources/base/versions.py index a98a0ad43..b86272377 100644 --- a/openml/_api/resources/base/versions.py +++ b/openml/_api/resources/base/versions.py @@ -5,7 +5,6 @@ import xmltodict -from openml._api.resources.base import ResourceAPI from openml.enums import APIVersion, ResourceType from openml.exceptions import ( OpenMLNotAuthorizedError, @@ -13,6 +12,8 @@ OpenMLServerException, ) +from .base import ResourceAPI + class ResourceV1API(ResourceAPI): api_version: APIVersion = APIVersion.V1 diff --git a/openml/_api/resources/dataset.py b/openml/_api/resources/dataset.py index 3ecad35da..51688a2fd 100644 --- a/openml/_api/resources/dataset.py +++ b/openml/_api/resources/dataset.py @@ -1,6 +1,6 @@ from __future__ import annotations -from openml._api.resources.base import DatasetAPI, ResourceV1API, ResourceV2API +from .base import DatasetAPI, ResourceV1API, ResourceV2API class DatasetV1API(ResourceV1API, DatasetAPI): diff --git a/openml/_api/resources/estimation_procedure.py b/openml/_api/resources/estimation_procedure.py index d2e73cfa6..b8ea7d2c3 100644 --- a/openml/_api/resources/estimation_procedure.py +++ b/openml/_api/resources/estimation_procedure.py @@ -1,6 +1,6 @@ from __future__ import annotations -from openml._api.resources.base import EstimationProcedureAPI, ResourceV1API, ResourceV2API +from .base import EstimationProcedureAPI, ResourceV1API, ResourceV2API class EstimationProcedureV1API(ResourceV1API, EstimationProcedureAPI): diff --git a/openml/_api/resources/evaluation.py b/openml/_api/resources/evaluation.py index a0149e1e5..07877e14e 100644 --- a/openml/_api/resources/evaluation.py +++ b/openml/_api/resources/evaluation.py @@ -1,6 +1,6 @@ from __future__ import annotations -from openml._api.resources.base import EvaluationAPI, ResourceV1API, ResourceV2API +from .base import EvaluationAPI, ResourceV1API, ResourceV2API class EvaluationV1API(ResourceV1API, EvaluationAPI): diff --git a/openml/_api/resources/evaluation_measure.py b/openml/_api/resources/evaluation_measure.py index bd4318417..63cf16c77 100644 --- a/openml/_api/resources/evaluation_measure.py +++ b/openml/_api/resources/evaluation_measure.py @@ -1,6 +1,6 @@ from __future__ import annotations -from openml._api.resources.base import EvaluationMeasureAPI, ResourceV1API, ResourceV2API +from .base import EvaluationMeasureAPI, ResourceV1API, ResourceV2API class EvaluationMeasureV1API(ResourceV1API, EvaluationMeasureAPI): diff --git a/openml/_api/resources/flow.py b/openml/_api/resources/flow.py index 3b62abd3f..ad2e05bd9 100644 --- a/openml/_api/resources/flow.py +++ b/openml/_api/resources/flow.py @@ -1,6 +1,6 @@ from __future__ import annotations -from openml._api.resources.base import FlowAPI, ResourceV1API, ResourceV2API +from .base import FlowAPI, ResourceV1API, ResourceV2API class FlowV1API(ResourceV1API, FlowAPI): diff --git a/openml/_api/resources/run.py b/openml/_api/resources/run.py index 9698c59dd..151c69e35 100644 --- a/openml/_api/resources/run.py +++ b/openml/_api/resources/run.py @@ -1,6 +1,6 @@ from __future__ import annotations -from openml._api.resources.base import ResourceV1API, ResourceV2API, RunAPI +from .base import ResourceV1API, ResourceV2API, RunAPI class RunV1API(ResourceV1API, RunAPI): diff --git a/openml/_api/resources/setup.py b/openml/_api/resources/setup.py index e948e1b38..78a36cecc 100644 --- a/openml/_api/resources/setup.py +++ b/openml/_api/resources/setup.py @@ -1,6 +1,6 @@ from __future__ import annotations -from openml._api.resources.base import ResourceV1API, ResourceV2API, SetupAPI +from .base import ResourceV1API, ResourceV2API, SetupAPI class SetupV1API(ResourceV1API, SetupAPI): diff --git a/openml/_api/resources/study.py b/openml/_api/resources/study.py index 8de5868d1..cefd55004 100644 --- a/openml/_api/resources/study.py +++ b/openml/_api/resources/study.py @@ -1,6 +1,6 @@ from __future__ import annotations -from openml._api.resources.base import ResourceV1API, ResourceV2API, StudyAPI +from .base import ResourceV1API, ResourceV2API, StudyAPI class StudyV1API(ResourceV1API, StudyAPI): diff --git a/openml/_api/resources/task.py b/openml/_api/resources/task.py index a97d5f726..a367c9aa1 100644 --- a/openml/_api/resources/task.py +++ b/openml/_api/resources/task.py @@ -1,6 +1,6 @@ from __future__ import annotations -from openml._api.resources.base import ResourceV1API, ResourceV2API, TaskAPI +from .base import ResourceV1API, ResourceV2API, TaskAPI class TaskV1API(ResourceV1API, TaskAPI): diff --git a/openml/_api/setup/__init__.py b/openml/_api/setup/__init__.py index e69de29bb..7f8c65ba3 100644 --- a/openml/_api/setup/__init__.py +++ b/openml/_api/setup/__init__.py @@ -0,0 +1,12 @@ +from .backend import APIBackend +from .builder import APIBackendBuilder +from .config import APIConfig, CacheConfig, Config, ConnectionConfig + +__all__ = [ + "APIBackend", + "APIBackendBuilder", + "APIConfig", + "CacheConfig", + "Config", + "ConnectionConfig", +] diff --git a/openml/_api/setup/_instance.py b/openml/_api/setup/_instance.py index 2d9818a0d..c98ccaf57 100644 --- a/openml/_api/setup/_instance.py +++ b/openml/_api/setup/_instance.py @@ -1,5 +1,5 @@ from __future__ import annotations -from openml._api.setup.backend import APIBackend +from .backend import APIBackend _backend = APIBackend.get_instance() diff --git a/openml/_api/setup/backend.py b/openml/_api/setup/backend.py index 7c300e143..f0faf5165 100644 --- a/openml/_api/setup/backend.py +++ b/openml/_api/setup/backend.py @@ -3,8 +3,8 @@ from copy import deepcopy from typing import Any -from openml._api.setup.builder import APIBackendBuilder -from openml._api.setup.config import Config +from .builder import APIBackendBuilder +from .config import Config class APIBackend: diff --git a/openml/_api/setup/builder.py b/openml/_api/setup/builder.py index 135b18da3..750db431a 100644 --- a/openml/_api/setup/builder.py +++ b/openml/_api/setup/builder.py @@ -5,13 +5,13 @@ from typing import TYPE_CHECKING from openml._api.clients import HTTPCache, HTTPClient, MinIOClient -from openml._api.resources import API_REGISTRY, FallbackProxy +from openml._api.resources import API_REGISTRY, FallbackProxy, ResourceAPI if TYPE_CHECKING: - from openml._api.resources.base import ResourceAPI - from openml._api.setup.config import Config from openml.enums import ResourceType + from .config import Config + class APIBackendBuilder: def __init__( diff --git a/openml/_api/setup/config.py b/openml/_api/setup/config.py index 64e790404..ea868262a 100644 --- a/openml/_api/setup/config.py +++ b/openml/_api/setup/config.py @@ -2,9 +2,10 @@ from dataclasses import dataclass, field -from openml._api.setup.utils import _resolve_default_cache_dir from openml.enums import APIVersion, RetryPolicy +from ._utils import _resolve_default_cache_dir + @dataclass class APIConfig: diff --git a/openml/_api/setup/utils.py b/openml/_api/setup/utils.py deleted file mode 100644 index ddcf5b41c..000000000 --- a/openml/_api/setup/utils.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import annotations - -import logging -import os -import platform -from pathlib import Path - -openml_logger = logging.getLogger("openml") - -# Default values (see also https://github.com/openml/OpenML/wiki/Client-API-Standards) -_user_path = Path("~").expanduser().absolute() - - -def _resolve_default_cache_dir() -> Path: - user_defined_cache_dir = os.environ.get("OPENML_CACHE_DIR") - if user_defined_cache_dir is not None: - return Path(user_defined_cache_dir) - - if platform.system().lower() != "linux": - return _user_path / ".openml" - - xdg_cache_home = os.environ.get("XDG_CACHE_HOME") - if xdg_cache_home is None: - return Path("~", ".cache", "openml") - - # This is the proper XDG_CACHE_HOME directory, but - # we unfortunately had a problem where we used XDG_CACHE_HOME/org, - # we check heuristically if this old directory still exists and issue - # a warning if it does. There's too much data to move to do this for the user. - - # The new cache directory exists - cache_dir = Path(xdg_cache_home) / "openml" - if cache_dir.exists(): - return cache_dir - - # The old cache directory *does not* exist - heuristic_dir_for_backwards_compat = Path(xdg_cache_home) / "org" / "openml" - if not heuristic_dir_for_backwards_compat.exists(): - return cache_dir - - root_dir_to_delete = Path(xdg_cache_home) / "org" - openml_logger.warning( - "An old cache directory was found at '%s'. This directory is no longer used by " - "OpenML-Python. To silence this warning you would need to delete the old cache " - "directory. The cached files will then be located in '%s'.", - root_dir_to_delete, - cache_dir, - ) - return Path(xdg_cache_home) From d7a37884cc18fee1509cd43fcec696dd0efbf466 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 2 Feb 2026 12:24:43 +0500 Subject: [PATCH 44/54] module level import for _backend --- openml/__init__.py | 2 +- openml/_api/__init__.py | 2 ++ openml/_api/setup/__init__.py | 2 ++ openml/_api/setup/_utils.py | 49 +++++++++++++++++++++++++++++++++++ 4 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 openml/_api/setup/_utils.py diff --git a/openml/__init__.py b/openml/__init__.py index fdf3b90e4..21dda24ad 100644 --- a/openml/__init__.py +++ b/openml/__init__.py @@ -33,7 +33,7 @@ utils, ) from .__version__ import __version__ -from ._api.setup._instance import _backend +from ._api import _backend from .datasets import OpenMLDataFeature, OpenMLDataset from .evaluations import OpenMLEvaluation from .flows import OpenMLFlow diff --git a/openml/_api/__init__.py b/openml/_api/__init__.py index 25bc2f262..2d4651431 100644 --- a/openml/_api/__init__.py +++ b/openml/_api/__init__.py @@ -33,6 +33,7 @@ CacheConfig, Config, ConnectionConfig, + _backend, ) __all__ = [ @@ -66,4 +67,5 @@ "StudyV2API", "TaskV1API", "TaskV2API", + "_backend", ] diff --git a/openml/_api/setup/__init__.py b/openml/_api/setup/__init__.py index 7f8c65ba3..1c28cfa9e 100644 --- a/openml/_api/setup/__init__.py +++ b/openml/_api/setup/__init__.py @@ -1,3 +1,4 @@ +from ._instance import _backend from .backend import APIBackend from .builder import APIBackendBuilder from .config import APIConfig, CacheConfig, Config, ConnectionConfig @@ -9,4 +10,5 @@ "CacheConfig", "Config", "ConnectionConfig", + "_backend", ] diff --git a/openml/_api/setup/_utils.py b/openml/_api/setup/_utils.py new file mode 100644 index 000000000..ddcf5b41c --- /dev/null +++ b/openml/_api/setup/_utils.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +import logging +import os +import platform +from pathlib import Path + +openml_logger = logging.getLogger("openml") + +# Default values (see also https://github.com/openml/OpenML/wiki/Client-API-Standards) +_user_path = Path("~").expanduser().absolute() + + +def _resolve_default_cache_dir() -> Path: + user_defined_cache_dir = os.environ.get("OPENML_CACHE_DIR") + if user_defined_cache_dir is not None: + return Path(user_defined_cache_dir) + + if platform.system().lower() != "linux": + return _user_path / ".openml" + + xdg_cache_home = os.environ.get("XDG_CACHE_HOME") + if xdg_cache_home is None: + return Path("~", ".cache", "openml") + + # This is the proper XDG_CACHE_HOME directory, but + # we unfortunately had a problem where we used XDG_CACHE_HOME/org, + # we check heuristically if this old directory still exists and issue + # a warning if it does. There's too much data to move to do this for the user. + + # The new cache directory exists + cache_dir = Path(xdg_cache_home) / "openml" + if cache_dir.exists(): + return cache_dir + + # The old cache directory *does not* exist + heuristic_dir_for_backwards_compat = Path(xdg_cache_home) / "org" / "openml" + if not heuristic_dir_for_backwards_compat.exists(): + return cache_dir + + root_dir_to_delete = Path(xdg_cache_home) / "org" + openml_logger.warning( + "An old cache directory was found at '%s'. This directory is no longer used by " + "OpenML-Python. To silence this warning you would need to delete the old cache " + "directory. The cached files will then be located in '%s'.", + root_dir_to_delete, + cache_dir, + ) + return Path(xdg_cache_home) From b5b9ef60047cff083e30ab7eb6cb66f02baa1ff6 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 2 Feb 2026 12:29:12 +0500 Subject: [PATCH 45/54] module level import for tests --- openml/_api/__init__.py | 24 ++++++++++++++++++++++++ openml/_api/resources/__init__.py | 29 ++++++++++++++++++++++++++++- openml/testing.py | 2 +- tests/test_api/test_versions.py | 2 +- 4 files changed, 54 insertions(+), 3 deletions(-) diff --git a/openml/_api/__init__.py b/openml/_api/__init__.py index 2d4651431..926fee3d4 100644 --- a/openml/_api/__init__.py +++ b/openml/_api/__init__.py @@ -5,24 +5,35 @@ ) from .resources import ( API_REGISTRY, + DatasetAPI, DatasetV1API, DatasetV2API, + EstimationProcedureAPI, EstimationProcedureV1API, EstimationProcedureV2API, + EvaluationAPI, + EvaluationMeasureAPI, EvaluationMeasureV1API, EvaluationMeasureV2API, EvaluationV1API, EvaluationV2API, FallbackProxy, + FlowAPI, FlowV1API, FlowV2API, ResourceAPI, + ResourceV1API, + ResourceV2API, + RunAPI, RunV1API, RunV2API, + SetupAPI, SetupV1API, SetupV2API, + StudyAPI, StudyV1API, StudyV2API, + TaskAPI, TaskV1API, TaskV2API, ) @@ -44,27 +55,40 @@ "CacheConfig", "Config", "ConnectionConfig", + "DatasetAPI", "DatasetV1API", "DatasetV2API", + "EstimationProcedureAPI", "EstimationProcedureV1API", "EstimationProcedureV2API", + "EvaluationAPI", + "EvaluationMeasureAPI", "EvaluationMeasureV1API", "EvaluationMeasureV2API", "EvaluationV1API", "EvaluationV2API", "FallbackProxy", + "FallbackProxy", + "FlowAPI", "FlowV1API", "FlowV2API", "HTTPCache", "HTTPClient", "MinIOClient", "ResourceAPI", + "ResourceAPI", + "ResourceV1API", + "ResourceV2API", + "RunAPI", "RunV1API", "RunV2API", + "SetupAPI", "SetupV1API", "SetupV2API", + "StudyAPI", "StudyV1API", "StudyV2API", + "TaskAPI", "TaskV1API", "TaskV2API", "_backend", diff --git a/openml/_api/resources/__init__.py b/openml/_api/resources/__init__.py index 863ec0f72..1f0b2caa1 100644 --- a/openml/_api/resources/__init__.py +++ b/openml/_api/resources/__init__.py @@ -1,5 +1,19 @@ from ._registry import API_REGISTRY -from .base import FallbackProxy, ResourceAPI +from .base import ( + DatasetAPI, + EstimationProcedureAPI, + EvaluationAPI, + EvaluationMeasureAPI, + FallbackProxy, + FlowAPI, + ResourceAPI, + ResourceV1API, + ResourceV2API, + RunAPI, + SetupAPI, + StudyAPI, + TaskAPI, +) from .dataset import DatasetV1API, DatasetV2API from .estimation_procedure import ( EstimationProcedureV1API, @@ -15,24 +29,37 @@ __all__ = [ "API_REGISTRY", + "DatasetAPI", "DatasetV1API", "DatasetV2API", + "EstimationProcedureAPI", "EstimationProcedureV1API", "EstimationProcedureV2API", + "EvaluationAPI", + "EvaluationMeasureAPI", "EvaluationMeasureV1API", "EvaluationMeasureV2API", "EvaluationV1API", "EvaluationV2API", "FallbackProxy", + "FallbackProxy", + "FlowAPI", "FlowV1API", "FlowV2API", "ResourceAPI", + "ResourceAPI", + "ResourceV1API", + "ResourceV2API", + "RunAPI", "RunV1API", "RunV2API", + "SetupAPI", "SetupV1API", "SetupV2API", + "StudyAPI", "StudyV1API", "StudyV2API", + "TaskAPI", "TaskV1API", "TaskV2API", ] diff --git a/openml/testing.py b/openml/testing.py index 3ca2d1b76..a971aa1c3 100644 --- a/openml/testing.py +++ b/openml/testing.py @@ -16,7 +16,7 @@ import requests import openml -from openml._api.clients import HTTPCache, HTTPClient +from openml._api import HTTPCache, HTTPClient from openml.enums import RetryPolicy from openml.exceptions import OpenMLServerException from openml.tasks import TaskType diff --git a/tests/test_api/test_versions.py b/tests/test_api/test_versions.py index a7451f3ae..2507a3cd5 100644 --- a/tests/test_api/test_versions.py +++ b/tests/test_api/test_versions.py @@ -1,6 +1,6 @@ import pytest from openml.testing import TestAPIBase -from openml._api.resources.base.versions import ResourceV1API +from openml._api import ResourceV1API from openml.enums import ResourceType From 567eca4096d1332d1db07f8646a3733c241885f3 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Mon, 2 Feb 2026 13:00:38 +0500 Subject: [PATCH 46/54] add test: test_tag_and_untag --- tests/test_api/test_versions.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/test_api/test_versions.py b/tests/test_api/test_versions.py index 2507a3cd5..6a4cad97d 100644 --- a/tests/test_api/test_versions.py +++ b/tests/test_api/test_versions.py @@ -1,3 +1,4 @@ +from time import time import pytest from openml.testing import TestAPIBase from openml._api import ResourceV1API @@ -41,4 +42,12 @@ def test_publish_and_delete(self): @pytest.mark.uses_test_server() def test_tag_and_untag(self): - pass + resource_id = 1 + unique_indicator = str(time()).replace(".", "") + tag = f"TestResourceV1API_test_tag_and_untag_{unique_indicator}" + + tags = self.resource.tag(resource_id, tag) + self.assertIn(tag, tags) + + tags = self.resource.untag(resource_id, tag) + self.assertNotIn(tag, tags) From b2287c32f5637a755f6b2e95c5472308969ef252 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 3 Feb 2026 10:06:20 +0500 Subject: [PATCH 47/54] implement get/set_config_values --- openml/_api/setup/backend.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/openml/_api/setup/backend.py b/openml/_api/setup/backend.py index f0faf5165..d8cf83f03 100644 --- a/openml/_api/setup/backend.py +++ b/openml/_api/setup/backend.py @@ -38,7 +38,7 @@ def set_config(cls, config: Config) -> None: instance._backend = APIBackendBuilder.build(config) @classmethod - def get_config_value(cls, key: str) -> Config: + def get_config_value(cls, key: str) -> Any: keys = key.split(".") config_value = cls.get_instance()._config for k in keys: @@ -60,3 +60,16 @@ def set_config_value(cls, key: str, value: Any) -> None: else: setattr(parent, keys[-1], value) cls.set_config(config) + + @classmethod + def get_config_values(cls, keys: list[str]) -> list[Any]: + values = [] + for key in keys: + value = cls.get_config_value(key) + values.append(value) + return values + + @classmethod + def set_config_values(cls, config_dict: dict[str, Any]) -> None: + for key, value in config_dict.items(): + cls.set_config_value(key, value) From b7e285eaafadabe88b7d4e0f42edc1f72459a2ee Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 3 Feb 2026 12:22:36 +0500 Subject: [PATCH 48/54] improve APIBackend.set_config_values --- openml/_api/setup/backend.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/openml/_api/setup/backend.py b/openml/_api/setup/backend.py index d8cf83f03..4dd0f4390 100644 --- a/openml/_api/setup/backend.py +++ b/openml/_api/setup/backend.py @@ -71,5 +71,16 @@ def get_config_values(cls, keys: list[str]) -> list[Any]: @classmethod def set_config_values(cls, config_dict: dict[str, Any]) -> None: + config = cls.get_instance()._config + for key, value in config_dict.items(): - cls.set_config_value(key, value) + keys = key.split(".") + parent = config + for k in keys[:-1]: + parent = parent[k] if isinstance(parent, dict) else getattr(parent, k) + if isinstance(parent, dict): + parent[keys[-1]] = value + else: + setattr(parent, keys[-1], value) + + cls.set_config(config) From fd43c489523c1a95e84bc2a95bf2caedd44262c2 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 3 Feb 2026 12:24:24 +0500 Subject: [PATCH 49/54] use LegacyConfig --- openml/__init__.py | 7 +++- openml/_api_calls.py | 19 +++++----- openml/{config.py => _config.py} | 36 +++++++++++++++++++ openml/_legacy_config.py | 19 ++++++++++ openml/base.py | 2 +- openml/cli.py | 14 ++++---- openml/datasets/dataset.py | 6 ++-- openml/datasets/functions.py | 6 ++-- openml/evaluations/evaluation.py | 1 - openml/runs/functions.py | 18 +++++----- openml/setups/functions.py | 5 ++- openml/setups/setup.py | 1 - openml/study/functions.py | 2 +- openml/study/study.py | 4 +-- openml/tasks/task.py | 2 +- openml/utils.py | 6 ++-- .../test_evaluations_example.py | 5 ++- tests/test_openml/test_api_calls.py | 1 - tests/test_openml/test_config.py | 2 +- 19 files changed, 106 insertions(+), 50 deletions(-) rename openml/{config.py => _config.py} (95%) create mode 100644 openml/_legacy_config.py diff --git a/openml/__init__.py b/openml/__init__.py index 21dda24ad..30f38f5f0 100644 --- a/openml/__init__.py +++ b/openml/__init__.py @@ -20,7 +20,8 @@ from . import ( _api_calls, - config, + _config, + _legacy_config, datasets, evaluations, exceptions, @@ -50,6 +51,8 @@ OpenMLTask, ) +config = _legacy_config.LegacyConfig + def populate_cache( task_ids: list[int] | None = None, @@ -111,6 +114,8 @@ def populate_cache( "__version__", "_api_calls", "_backend", + "_config", + "_legacy_config", "config", "datasets", "evaluations", diff --git a/openml/_api_calls.py b/openml/_api_calls.py index 9e53bd9fa..21d5c4391 100644 --- a/openml/_api_calls.py +++ b/openml/_api_calls.py @@ -19,7 +19,8 @@ import xmltodict from urllib3 import ProxyManager -from . import config +import openml + from .__version__ import __version__ from .exceptions import ( OpenMLHashException, @@ -70,7 +71,7 @@ def resolve_env_proxies(url: str) -> str | None: def _create_url_from_endpoint(endpoint: str) -> str: - url = config.server + url: str = openml.config.server if not url.endswith("/"): url += "/" url += endpoint @@ -171,7 +172,7 @@ def _download_minio_file( bucket_name=bucket, object_name=object_name, file_path=str(destination), - progress=ProgressBar() if config.show_progress else None, + progress=ProgressBar() if openml.config.show_progress else None, request_headers=_HEADERS, ) if destination.is_file() and destination.suffix == ".zip": @@ -300,7 +301,7 @@ def _file_id_to_url(file_id: int, filename: str | None = None) -> str: Presents the URL how to download a given file id filename is optional """ - openml_url = config.server.split("/api/") + openml_url: str = openml.config.server.split("/api/") url = openml_url[0] + f"/data/download/{file_id!s}" if filename is not None: url += "/" + filename @@ -316,7 +317,7 @@ def _read_url_files( and sending file_elements as files """ data = {} if data is None else data - data["api_key"] = config.apikey + data["api_key"] = openml.config.apikey if file_elements is None: file_elements = {} # Using requests.post sets header 'Accept-encoding' automatically to @@ -336,8 +337,8 @@ def __read_url( md5_checksum: str | None = None, ) -> requests.Response: data = {} if data is None else data - if config.apikey: - data["api_key"] = config.apikey + if openml.config.apikey: + data["api_key"] = openml.config.apikey return _send_request( request_method=request_method, url=url, @@ -362,10 +363,10 @@ def _send_request( # noqa: C901, PLR0912 files: FILE_ELEMENTS_TYPE | None = None, md5_checksum: str | None = None, ) -> requests.Response: - n_retries = max(1, config.connection_n_retries) + n_retries = max(1, openml.config.connection_n_retries) response: requests.Response | None = None - delay_method = _human_delay if config.retry_policy == "human" else _robot_delay + delay_method = _human_delay if openml.config.retry_policy == "human" else _robot_delay # Error to raise in case of retrying too often. Will be set to the last observed exception. retry_raise_e: Exception | None = None diff --git a/openml/config.py b/openml/_config.py similarity index 95% rename from openml/config.py rename to openml/_config.py index e6104fd7f..c266ae9d9 100644 --- a/openml/config.py +++ b/openml/_config.py @@ -18,6 +18,8 @@ from typing_extensions import TypedDict from urllib.parse import urlparse +from openml.enums import RetryPolicy + logger = logging.getLogger(__name__) openml_logger = logging.getLogger("openml") console_handler: logging.StreamHandler | None = None @@ -206,6 +208,8 @@ def set_retry_policy(value: Literal["human", "robot"], n_retries: int | None = N retry_policy = value connection_n_retries = default_retries_by_policy[value] if n_retries is None else n_retries + _sync_api_config() + class ConfigurationForExamples: """Allows easy switching to and from a test configuration, used for examples.""" @@ -244,6 +248,8 @@ def start_using_configuration_for_example(cls) -> None: stacklevel=2, ) + _sync_api_config() + @classmethod def stop_using_configuration_for_example(cls) -> None: """Return to configuration as it was before `start_use_example_configuration`.""" @@ -262,6 +268,8 @@ def stop_using_configuration_for_example(cls) -> None: apikey = cast("str", cls._last_used_key) cls._start_last_called = False + _sync_api_config() + def _handle_xdg_config_home_backwards_compatibility( xdg_home: str, @@ -374,6 +382,8 @@ def _setup(config: _Config | None = None) -> None: short_cache_dir = Path(config["cachedir"]) _root_cache_directory = short_cache_dir.expanduser().resolve() + _sync_api_config() + try: cache_exists = _root_cache_directory.exists() # create the cache subdirectory @@ -408,6 +418,8 @@ def set_field_in_config_file(field: str, value: Any) -> None: if value is not None: fh.write(f"{f} = {value}\n") + _sync_api_config() + def _parse_config(config_file: str | Path) -> _Config: """Parse the config file, set up defaults.""" @@ -495,6 +507,8 @@ def set_root_cache_directory(root_cache_directory: str | Path) -> None: global _root_cache_directory # noqa: PLW0603 _root_cache_directory = Path(root_cache_directory) + _sync_api_config() + start_using_configuration_for_example = ( ConfigurationForExamples.start_using_configuration_for_example @@ -514,6 +528,28 @@ def overwrite_config_context(config: dict[str, Any]) -> Iterator[_Config]: _setup(existing_config) +def _sync_api_config() -> None: + """Sync the new API config with the legacy config in this file.""" + from ._api import APIBackend + + p = urlparse(server) + v1_server = f"{p.scheme}://{p.netloc}/" + v1_base_url = p.path.lstrip("/") + connection_retry_policy = RetryPolicy.HUMAN if retry_policy == "human" else RetryPolicy.ROBOT + cache_dir = str(_root_cache_directory) + + APIBackend.set_config_values( + { + "api_configs.v1.server": v1_server, + "api_configs.v1.base_url": v1_base_url, + "api_configs.v1.api_key": apikey, + "cache.dir": cache_dir, + "connection.retry_policy": connection_retry_policy, + "connection.retries": connection_n_retries, + } + ) + + __all__ = [ "get_cache_directory", "get_config_as_dict", diff --git a/openml/_legacy_config.py b/openml/_legacy_config.py new file mode 100644 index 000000000..b26b13c01 --- /dev/null +++ b/openml/_legacy_config.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +from typing import Any + + +class LegacyConfigMeta(type): + def __getattr__(cls, name: str) -> Any: + import openml + + return getattr(openml._config, name) + + def __setattr__(cls, name: str, value: Any) -> None: + import openml + + setattr(openml._config, name, value) + + +class LegacyConfig(metaclass=LegacyConfigMeta): + pass diff --git a/openml/base.py b/openml/base.py index a282be8eb..f79bc2931 100644 --- a/openml/base.py +++ b/openml/base.py @@ -8,8 +8,8 @@ import xmltodict +import openml import openml._api_calls -import openml.config from .utils import _get_rest_api_type_alias, _tag_openml_base diff --git a/openml/cli.py b/openml/cli.py index 0afb089c2..2120449e8 100644 --- a/openml/cli.py +++ b/openml/cli.py @@ -9,7 +9,7 @@ from pathlib import Path from urllib.parse import urlparse -from openml import config +import openml from openml.__version__ import __version__ @@ -59,17 +59,17 @@ def wait_until_valid_input( def print_configuration() -> None: - file = config.determine_config_file_path() + file = openml.config.determine_config_file_path() header = f"File '{file}' contains (or defaults to):" print(header) - max_key_length = max(map(len, config.get_config_as_dict())) - for field, value in config.get_config_as_dict().items(): + max_key_length = max(map(len, openml.config.get_config_as_dict())) + for field, value in openml.config.get_config_as_dict().items(): print(f"{field.ljust(max_key_length)}: {value}") def verbose_set(field: str, value: str) -> None: - config.set_field_in_config_file(field, value) + openml.config.set_field_in_config_file(field, value) print(f"{field} set to '{value}'.") @@ -82,7 +82,7 @@ def check_apikey(apikey: str) -> str: return "" instructions = ( - f"Your current API key is set to: '{config.apikey}'. " + f"Your current API key is set to: '{openml.config.apikey}'. " "You can get an API key at https://new.openml.org. " "You must create an account if you don't have one yet:\n" " 1. Log in with the account.\n" @@ -347,7 +347,7 @@ def main() -> None: "'https://openml.github.io/openml-python/main/usage.html#configuration'.", ) - configurable_fields = [f for f in config._defaults if f not in ["max_retries"]] + configurable_fields = [f for f in openml.config._defaults if f not in ["max_retries"]] parser_configure.add_argument( "field", diff --git a/openml/datasets/dataset.py b/openml/datasets/dataset.py index d9eee278d..59d6205ba 100644 --- a/openml/datasets/dataset.py +++ b/openml/datasets/dataset.py @@ -17,8 +17,8 @@ import scipy.sparse import xmltodict +import openml from openml.base import OpenMLBase -from openml.config import OPENML_SKIP_PARQUET_ENV_VAR from .data_feature import OpenMLDataFeature @@ -375,7 +375,9 @@ def _download_data(self) -> None: # import required here to avoid circular import. from .functions import _get_dataset_arff, _get_dataset_parquet - skip_parquet = os.environ.get(OPENML_SKIP_PARQUET_ENV_VAR, "false").casefold() == "true" + skip_parquet = ( + os.environ.get(openml.config.OPENML_SKIP_PARQUET_ENV_VAR, "false").casefold() == "true" + ) if self._parquet_url is not None and not skip_parquet: parquet_file = _get_dataset_parquet(self) self.parquet_file = None if parquet_file is None else str(parquet_file) diff --git a/openml/datasets/functions.py b/openml/datasets/functions.py index 3ac657ea0..432938520 100644 --- a/openml/datasets/functions.py +++ b/openml/datasets/functions.py @@ -19,9 +19,9 @@ import xmltodict from scipy.sparse import coo_matrix +import openml import openml._api_calls import openml.utils -from openml.config import OPENML_SKIP_PARQUET_ENV_VAR from openml.exceptions import ( OpenMLHashException, OpenMLPrivateDatasetError, @@ -492,7 +492,9 @@ def get_dataset( # noqa: C901, PLR0912 qualities_file = _get_dataset_qualities_file(did_cache_dir, dataset_id) parquet_file = None - skip_parquet = os.environ.get(OPENML_SKIP_PARQUET_ENV_VAR, "false").casefold() == "true" + skip_parquet = ( + os.environ.get(openml.config.OPENML_SKIP_PARQUET_ENV_VAR, "false").casefold() == "true" + ) download_parquet = "oml:parquet_url" in description and not skip_parquet if download_parquet and (download_data or download_all_files): try: diff --git a/openml/evaluations/evaluation.py b/openml/evaluations/evaluation.py index 5db087024..87df8454a 100644 --- a/openml/evaluations/evaluation.py +++ b/openml/evaluations/evaluation.py @@ -3,7 +3,6 @@ from dataclasses import asdict, dataclass -import openml.config import openml.datasets import openml.flows import openml.runs diff --git a/openml/runs/functions.py b/openml/runs/functions.py index 503788dbd..914a3b46b 100644 --- a/openml/runs/functions.py +++ b/openml/runs/functions.py @@ -18,7 +18,6 @@ import openml import openml._api_calls import openml.utils -from openml import config from openml.exceptions import ( OpenMLCacheException, OpenMLRunsExistError, @@ -45,7 +44,6 @@ # Avoid import cycles: https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles if TYPE_CHECKING: - from openml.config import _Config from openml.extensions.extension_interface import Extension # get_dict is in run.py to avoid circular imports @@ -107,7 +105,7 @@ def run_model_on_task( # noqa: PLR0913 """ if avoid_duplicate_runs is None: avoid_duplicate_runs = openml.config.avoid_duplicate_runs - if avoid_duplicate_runs and not config.apikey: + if avoid_duplicate_runs and not openml.config.apikey: warnings.warn( "avoid_duplicate_runs is set to True, but no API key is set. " "Please set your API key in the OpenML configuration file, see" @@ -336,7 +334,7 @@ def run_flow_on_task( # noqa: C901, PLR0912, PLR0915, PLR0913 message = f"Executed Task {task.task_id} with Flow id:{run.flow_id}" else: message = f"Executed Task {task.task_id} on local Flow with name {flow.name}." - config.logger.info(message) + openml.config.logger.info(message) return run @@ -528,7 +526,7 @@ def _run_task_get_arffcontent( # noqa: PLR0915, PLR0912, C901 # The forked child process may not copy the configuration state of OpenML from the parent. # Current configuration setup needs to be copied and passed to the child processes. - _config = config.get_config_as_dict() + _config = openml.config.get_config_as_dict() # Execute runs in parallel # assuming the same number of tasks as workers (n_jobs), the total compute time for this # statement will be similar to the slowest run @@ -551,7 +549,7 @@ def _run_task_get_arffcontent( # noqa: PLR0915, PLR0912, C901 rep_no=rep_no, sample_no=sample_no, task=task, - configuration=_config, + configuration=openml.config._Config, ) for _n_fit, rep_no, fold_no, sample_no in jobs ) # job_rvals contain the output of all the runs with one-to-one correspondence with `jobs` @@ -694,7 +692,7 @@ def _run_task_get_arffcontent_parallel_helper( # noqa: PLR0913 rep_no: int, sample_no: int, task: OpenMLTask, - configuration: _Config | None = None, + configuration: openml.config._Config | None = None, # type: ignore[name-defined] ) -> tuple[ np.ndarray, pd.DataFrame | None, @@ -719,7 +717,7 @@ def _run_task_get_arffcontent_parallel_helper( # noqa: PLR0913 Sample number to be run. task : OpenMLTask The task object from OpenML. - configuration : _Config + configuration : openml.config._Config Hyperparameters to configure the model. Returns @@ -733,7 +731,7 @@ def _run_task_get_arffcontent_parallel_helper( # noqa: PLR0913 """ # Sets up the OpenML instantiated in the child process to match that of the parent's # if configuration=None, loads the default - config._setup(configuration) + openml.config._setup(configuration) train_indices, test_indices = task.get_train_test_split_indices( repeat=rep_no, @@ -762,7 +760,7 @@ def _run_task_get_arffcontent_parallel_helper( # noqa: PLR0913 f"task_class={task.__class__.__name__}" ) - config.logger.info( + openml.config.logger.info( f"Going to run model {model!s} on " f"dataset {openml.datasets.get_dataset(task.dataset_id).name} " f"for repeat {rep_no} fold {fold_no} sample {sample_no}" diff --git a/openml/setups/functions.py b/openml/setups/functions.py index 4bf279ed1..a24d3a456 100644 --- a/openml/setups/functions.py +++ b/openml/setups/functions.py @@ -14,7 +14,6 @@ import openml import openml.exceptions import openml.utils -from openml import config from openml.flows import OpenMLFlow, flow_exists from .setup import OpenMLParameter, OpenMLSetup @@ -84,7 +83,7 @@ def _get_cached_setup(setup_id: int) -> OpenMLSetup: OpenMLCacheException If the setup file for the given setup ID is not cached. """ - cache_dir = Path(config.get_cache_directory()) + cache_dir = Path(openml.config.get_cache_directory()) setup_cache_dir = cache_dir / "setups" / str(setup_id) try: setup_file = setup_cache_dir / "description.xml" @@ -112,7 +111,7 @@ def get_setup(setup_id: int) -> OpenMLSetup: ------- OpenMLSetup (an initialized openml setup object) """ - setup_dir = Path(config.get_cache_directory()) / "setups" / str(setup_id) + setup_dir = Path(openml.config.get_cache_directory()) / "setups" / str(setup_id) setup_dir.mkdir(exist_ok=True, parents=True) setup_file = setup_dir / "description.xml" diff --git a/openml/setups/setup.py b/openml/setups/setup.py index 0960ad4c1..6c63b88ef 100644 --- a/openml/setups/setup.py +++ b/openml/setups/setup.py @@ -3,7 +3,6 @@ from typing import Any -import openml.config import openml.flows diff --git a/openml/study/functions.py b/openml/study/functions.py index bb24ddcff..367537773 100644 --- a/openml/study/functions.py +++ b/openml/study/functions.py @@ -8,8 +8,8 @@ import pandas as pd import xmltodict +import openml import openml._api_calls -import openml.config import openml.utils from openml.study.study import OpenMLBenchmarkSuite, OpenMLStudy diff --git a/openml/study/study.py b/openml/study/study.py index 7a9c80bbe..803c6455b 100644 --- a/openml/study/study.py +++ b/openml/study/study.py @@ -5,8 +5,8 @@ from collections.abc import Sequence from typing import Any +import openml from openml.base import OpenMLBase -from openml.config import get_server_base_url class BaseStudy(OpenMLBase): @@ -111,7 +111,7 @@ def _get_repr_body_fields(self) -> Sequence[tuple[str, str | int | list[str]]]: fields["ID"] = self.study_id fields["Study URL"] = self.openml_url if self.creator is not None: - fields["Creator"] = f"{get_server_base_url()}/u/{self.creator}" + fields["Creator"] = f"{openml.config.get_server_base_url()}/u/{self.creator}" if self.creation_date is not None: fields["Upload Time"] = self.creation_date.replace("T", " ") if self.data is not None: diff --git a/openml/tasks/task.py b/openml/tasks/task.py index b297a105c..202abac32 100644 --- a/openml/tasks/task.py +++ b/openml/tasks/task.py @@ -11,8 +11,8 @@ from typing import TYPE_CHECKING, Any from typing_extensions import TypedDict +import openml import openml._api_calls -import openml.config from openml import datasets from openml.base import OpenMLBase from openml.utils import _create_cache_directory_for_id diff --git a/openml/utils.py b/openml/utils.py index 3680bc0ff..daa86ab50 100644 --- a/openml/utils.py +++ b/openml/utils.py @@ -19,8 +19,6 @@ import openml._api_calls import openml.exceptions -from . import config - # Avoid import cycles: https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles if TYPE_CHECKING: from openml.base import OpenMLBase @@ -329,7 +327,7 @@ def _list_all( # noqa: C901 def _get_cache_dir_for_key(key: str) -> Path: - return Path(config.get_cache_directory()) / key + return Path(openml.config.get_cache_directory()) / key def _create_cache_directory(key: str) -> Path: @@ -429,7 +427,7 @@ def safe_func(*args: P.args, **kwargs: P.kwargs) -> R: def _create_lockfiles_dir() -> Path: - path = Path(config.get_cache_directory()) / "locks" + path = Path(openml.config.get_cache_directory()) / "locks" # TODO(eddiebergman): Not sure why this is allowed to error and ignore??? with contextlib.suppress(OSError): path.mkdir(exist_ok=True, parents=True) diff --git a/tests/test_evaluations/test_evaluations_example.py b/tests/test_evaluations/test_evaluations_example.py index a9ad7e8c1..7ea25e55c 100644 --- a/tests/test_evaluations/test_evaluations_example.py +++ b/tests/test_evaluations/test_evaluations_example.py @@ -2,15 +2,14 @@ from __future__ import annotations import unittest - -from openml.config import overwrite_config_context +import openml class TestEvaluationsExample(unittest.TestCase): def test_example_python_paper(self): # Example script which will appear in the upcoming OpenML-Python paper # This test ensures that the example will keep running! - with overwrite_config_context( + with openml.config.overwrite_config_context( { "server": "https://www.openml.org/api/v1/xml", "apikey": None, diff --git a/tests/test_openml/test_api_calls.py b/tests/test_openml/test_api_calls.py index a295259ef..6b1cc64b1 100644 --- a/tests/test_openml/test_api_calls.py +++ b/tests/test_openml/test_api_calls.py @@ -9,7 +9,6 @@ import pytest import openml -from openml.config import ConfigurationForExamples import openml.testing from openml._api_calls import _download_minio_bucket, API_TOKEN_HELP_LINK diff --git a/tests/test_openml/test_config.py b/tests/test_openml/test_config.py index 7ef223504..bcb37dcec 100644 --- a/tests/test_openml/test_config.py +++ b/tests/test_openml/test_config.py @@ -12,7 +12,7 @@ import pytest -import openml.config +import openml import openml.testing from openml.testing import TestBase From f4aab6bc2191a94ed37aed2dea0e837630baba11 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 3 Feb 2026 12:24:43 +0500 Subject: [PATCH 50/54] Revert "use LegacyConfig" This reverts commit fd43c489523c1a95e84bc2a95bf2caedd44262c2. --- openml/__init__.py | 7 +--- openml/_api_calls.py | 19 +++++----- openml/_legacy_config.py | 19 ---------- openml/base.py | 2 +- openml/cli.py | 14 ++++---- openml/{_config.py => config.py} | 36 ------------------- openml/datasets/dataset.py | 6 ++-- openml/datasets/functions.py | 6 ++-- openml/evaluations/evaluation.py | 1 + openml/runs/functions.py | 18 +++++----- openml/setups/functions.py | 5 +-- openml/setups/setup.py | 1 + openml/study/functions.py | 2 +- openml/study/study.py | 4 +-- openml/tasks/task.py | 2 +- openml/utils.py | 6 ++-- .../test_evaluations_example.py | 5 +-- tests/test_openml/test_api_calls.py | 1 + tests/test_openml/test_config.py | 2 +- 19 files changed, 50 insertions(+), 106 deletions(-) delete mode 100644 openml/_legacy_config.py rename openml/{_config.py => config.py} (95%) diff --git a/openml/__init__.py b/openml/__init__.py index 30f38f5f0..21dda24ad 100644 --- a/openml/__init__.py +++ b/openml/__init__.py @@ -20,8 +20,7 @@ from . import ( _api_calls, - _config, - _legacy_config, + config, datasets, evaluations, exceptions, @@ -51,8 +50,6 @@ OpenMLTask, ) -config = _legacy_config.LegacyConfig - def populate_cache( task_ids: list[int] | None = None, @@ -114,8 +111,6 @@ def populate_cache( "__version__", "_api_calls", "_backend", - "_config", - "_legacy_config", "config", "datasets", "evaluations", diff --git a/openml/_api_calls.py b/openml/_api_calls.py index 21d5c4391..9e53bd9fa 100644 --- a/openml/_api_calls.py +++ b/openml/_api_calls.py @@ -19,8 +19,7 @@ import xmltodict from urllib3 import ProxyManager -import openml - +from . import config from .__version__ import __version__ from .exceptions import ( OpenMLHashException, @@ -71,7 +70,7 @@ def resolve_env_proxies(url: str) -> str | None: def _create_url_from_endpoint(endpoint: str) -> str: - url: str = openml.config.server + url = config.server if not url.endswith("/"): url += "/" url += endpoint @@ -172,7 +171,7 @@ def _download_minio_file( bucket_name=bucket, object_name=object_name, file_path=str(destination), - progress=ProgressBar() if openml.config.show_progress else None, + progress=ProgressBar() if config.show_progress else None, request_headers=_HEADERS, ) if destination.is_file() and destination.suffix == ".zip": @@ -301,7 +300,7 @@ def _file_id_to_url(file_id: int, filename: str | None = None) -> str: Presents the URL how to download a given file id filename is optional """ - openml_url: str = openml.config.server.split("/api/") + openml_url = config.server.split("/api/") url = openml_url[0] + f"/data/download/{file_id!s}" if filename is not None: url += "/" + filename @@ -317,7 +316,7 @@ def _read_url_files( and sending file_elements as files """ data = {} if data is None else data - data["api_key"] = openml.config.apikey + data["api_key"] = config.apikey if file_elements is None: file_elements = {} # Using requests.post sets header 'Accept-encoding' automatically to @@ -337,8 +336,8 @@ def __read_url( md5_checksum: str | None = None, ) -> requests.Response: data = {} if data is None else data - if openml.config.apikey: - data["api_key"] = openml.config.apikey + if config.apikey: + data["api_key"] = config.apikey return _send_request( request_method=request_method, url=url, @@ -363,10 +362,10 @@ def _send_request( # noqa: C901, PLR0912 files: FILE_ELEMENTS_TYPE | None = None, md5_checksum: str | None = None, ) -> requests.Response: - n_retries = max(1, openml.config.connection_n_retries) + n_retries = max(1, config.connection_n_retries) response: requests.Response | None = None - delay_method = _human_delay if openml.config.retry_policy == "human" else _robot_delay + delay_method = _human_delay if config.retry_policy == "human" else _robot_delay # Error to raise in case of retrying too often. Will be set to the last observed exception. retry_raise_e: Exception | None = None diff --git a/openml/_legacy_config.py b/openml/_legacy_config.py deleted file mode 100644 index b26b13c01..000000000 --- a/openml/_legacy_config.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import annotations - -from typing import Any - - -class LegacyConfigMeta(type): - def __getattr__(cls, name: str) -> Any: - import openml - - return getattr(openml._config, name) - - def __setattr__(cls, name: str, value: Any) -> None: - import openml - - setattr(openml._config, name, value) - - -class LegacyConfig(metaclass=LegacyConfigMeta): - pass diff --git a/openml/base.py b/openml/base.py index f79bc2931..a282be8eb 100644 --- a/openml/base.py +++ b/openml/base.py @@ -8,8 +8,8 @@ import xmltodict -import openml import openml._api_calls +import openml.config from .utils import _get_rest_api_type_alias, _tag_openml_base diff --git a/openml/cli.py b/openml/cli.py index 2120449e8..0afb089c2 100644 --- a/openml/cli.py +++ b/openml/cli.py @@ -9,7 +9,7 @@ from pathlib import Path from urllib.parse import urlparse -import openml +from openml import config from openml.__version__ import __version__ @@ -59,17 +59,17 @@ def wait_until_valid_input( def print_configuration() -> None: - file = openml.config.determine_config_file_path() + file = config.determine_config_file_path() header = f"File '{file}' contains (or defaults to):" print(header) - max_key_length = max(map(len, openml.config.get_config_as_dict())) - for field, value in openml.config.get_config_as_dict().items(): + max_key_length = max(map(len, config.get_config_as_dict())) + for field, value in config.get_config_as_dict().items(): print(f"{field.ljust(max_key_length)}: {value}") def verbose_set(field: str, value: str) -> None: - openml.config.set_field_in_config_file(field, value) + config.set_field_in_config_file(field, value) print(f"{field} set to '{value}'.") @@ -82,7 +82,7 @@ def check_apikey(apikey: str) -> str: return "" instructions = ( - f"Your current API key is set to: '{openml.config.apikey}'. " + f"Your current API key is set to: '{config.apikey}'. " "You can get an API key at https://new.openml.org. " "You must create an account if you don't have one yet:\n" " 1. Log in with the account.\n" @@ -347,7 +347,7 @@ def main() -> None: "'https://openml.github.io/openml-python/main/usage.html#configuration'.", ) - configurable_fields = [f for f in openml.config._defaults if f not in ["max_retries"]] + configurable_fields = [f for f in config._defaults if f not in ["max_retries"]] parser_configure.add_argument( "field", diff --git a/openml/_config.py b/openml/config.py similarity index 95% rename from openml/_config.py rename to openml/config.py index c266ae9d9..e6104fd7f 100644 --- a/openml/_config.py +++ b/openml/config.py @@ -18,8 +18,6 @@ from typing_extensions import TypedDict from urllib.parse import urlparse -from openml.enums import RetryPolicy - logger = logging.getLogger(__name__) openml_logger = logging.getLogger("openml") console_handler: logging.StreamHandler | None = None @@ -208,8 +206,6 @@ def set_retry_policy(value: Literal["human", "robot"], n_retries: int | None = N retry_policy = value connection_n_retries = default_retries_by_policy[value] if n_retries is None else n_retries - _sync_api_config() - class ConfigurationForExamples: """Allows easy switching to and from a test configuration, used for examples.""" @@ -248,8 +244,6 @@ def start_using_configuration_for_example(cls) -> None: stacklevel=2, ) - _sync_api_config() - @classmethod def stop_using_configuration_for_example(cls) -> None: """Return to configuration as it was before `start_use_example_configuration`.""" @@ -268,8 +262,6 @@ def stop_using_configuration_for_example(cls) -> None: apikey = cast("str", cls._last_used_key) cls._start_last_called = False - _sync_api_config() - def _handle_xdg_config_home_backwards_compatibility( xdg_home: str, @@ -382,8 +374,6 @@ def _setup(config: _Config | None = None) -> None: short_cache_dir = Path(config["cachedir"]) _root_cache_directory = short_cache_dir.expanduser().resolve() - _sync_api_config() - try: cache_exists = _root_cache_directory.exists() # create the cache subdirectory @@ -418,8 +408,6 @@ def set_field_in_config_file(field: str, value: Any) -> None: if value is not None: fh.write(f"{f} = {value}\n") - _sync_api_config() - def _parse_config(config_file: str | Path) -> _Config: """Parse the config file, set up defaults.""" @@ -507,8 +495,6 @@ def set_root_cache_directory(root_cache_directory: str | Path) -> None: global _root_cache_directory # noqa: PLW0603 _root_cache_directory = Path(root_cache_directory) - _sync_api_config() - start_using_configuration_for_example = ( ConfigurationForExamples.start_using_configuration_for_example @@ -528,28 +514,6 @@ def overwrite_config_context(config: dict[str, Any]) -> Iterator[_Config]: _setup(existing_config) -def _sync_api_config() -> None: - """Sync the new API config with the legacy config in this file.""" - from ._api import APIBackend - - p = urlparse(server) - v1_server = f"{p.scheme}://{p.netloc}/" - v1_base_url = p.path.lstrip("/") - connection_retry_policy = RetryPolicy.HUMAN if retry_policy == "human" else RetryPolicy.ROBOT - cache_dir = str(_root_cache_directory) - - APIBackend.set_config_values( - { - "api_configs.v1.server": v1_server, - "api_configs.v1.base_url": v1_base_url, - "api_configs.v1.api_key": apikey, - "cache.dir": cache_dir, - "connection.retry_policy": connection_retry_policy, - "connection.retries": connection_n_retries, - } - ) - - __all__ = [ "get_cache_directory", "get_config_as_dict", diff --git a/openml/datasets/dataset.py b/openml/datasets/dataset.py index 59d6205ba..d9eee278d 100644 --- a/openml/datasets/dataset.py +++ b/openml/datasets/dataset.py @@ -17,8 +17,8 @@ import scipy.sparse import xmltodict -import openml from openml.base import OpenMLBase +from openml.config import OPENML_SKIP_PARQUET_ENV_VAR from .data_feature import OpenMLDataFeature @@ -375,9 +375,7 @@ def _download_data(self) -> None: # import required here to avoid circular import. from .functions import _get_dataset_arff, _get_dataset_parquet - skip_parquet = ( - os.environ.get(openml.config.OPENML_SKIP_PARQUET_ENV_VAR, "false").casefold() == "true" - ) + skip_parquet = os.environ.get(OPENML_SKIP_PARQUET_ENV_VAR, "false").casefold() == "true" if self._parquet_url is not None and not skip_parquet: parquet_file = _get_dataset_parquet(self) self.parquet_file = None if parquet_file is None else str(parquet_file) diff --git a/openml/datasets/functions.py b/openml/datasets/functions.py index 432938520..3ac657ea0 100644 --- a/openml/datasets/functions.py +++ b/openml/datasets/functions.py @@ -19,9 +19,9 @@ import xmltodict from scipy.sparse import coo_matrix -import openml import openml._api_calls import openml.utils +from openml.config import OPENML_SKIP_PARQUET_ENV_VAR from openml.exceptions import ( OpenMLHashException, OpenMLPrivateDatasetError, @@ -492,9 +492,7 @@ def get_dataset( # noqa: C901, PLR0912 qualities_file = _get_dataset_qualities_file(did_cache_dir, dataset_id) parquet_file = None - skip_parquet = ( - os.environ.get(openml.config.OPENML_SKIP_PARQUET_ENV_VAR, "false").casefold() == "true" - ) + skip_parquet = os.environ.get(OPENML_SKIP_PARQUET_ENV_VAR, "false").casefold() == "true" download_parquet = "oml:parquet_url" in description and not skip_parquet if download_parquet and (download_data or download_all_files): try: diff --git a/openml/evaluations/evaluation.py b/openml/evaluations/evaluation.py index 87df8454a..5db087024 100644 --- a/openml/evaluations/evaluation.py +++ b/openml/evaluations/evaluation.py @@ -3,6 +3,7 @@ from dataclasses import asdict, dataclass +import openml.config import openml.datasets import openml.flows import openml.runs diff --git a/openml/runs/functions.py b/openml/runs/functions.py index 914a3b46b..503788dbd 100644 --- a/openml/runs/functions.py +++ b/openml/runs/functions.py @@ -18,6 +18,7 @@ import openml import openml._api_calls import openml.utils +from openml import config from openml.exceptions import ( OpenMLCacheException, OpenMLRunsExistError, @@ -44,6 +45,7 @@ # Avoid import cycles: https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles if TYPE_CHECKING: + from openml.config import _Config from openml.extensions.extension_interface import Extension # get_dict is in run.py to avoid circular imports @@ -105,7 +107,7 @@ def run_model_on_task( # noqa: PLR0913 """ if avoid_duplicate_runs is None: avoid_duplicate_runs = openml.config.avoid_duplicate_runs - if avoid_duplicate_runs and not openml.config.apikey: + if avoid_duplicate_runs and not config.apikey: warnings.warn( "avoid_duplicate_runs is set to True, but no API key is set. " "Please set your API key in the OpenML configuration file, see" @@ -334,7 +336,7 @@ def run_flow_on_task( # noqa: C901, PLR0912, PLR0915, PLR0913 message = f"Executed Task {task.task_id} with Flow id:{run.flow_id}" else: message = f"Executed Task {task.task_id} on local Flow with name {flow.name}." - openml.config.logger.info(message) + config.logger.info(message) return run @@ -526,7 +528,7 @@ def _run_task_get_arffcontent( # noqa: PLR0915, PLR0912, C901 # The forked child process may not copy the configuration state of OpenML from the parent. # Current configuration setup needs to be copied and passed to the child processes. - _config = openml.config.get_config_as_dict() + _config = config.get_config_as_dict() # Execute runs in parallel # assuming the same number of tasks as workers (n_jobs), the total compute time for this # statement will be similar to the slowest run @@ -549,7 +551,7 @@ def _run_task_get_arffcontent( # noqa: PLR0915, PLR0912, C901 rep_no=rep_no, sample_no=sample_no, task=task, - configuration=openml.config._Config, + configuration=_config, ) for _n_fit, rep_no, fold_no, sample_no in jobs ) # job_rvals contain the output of all the runs with one-to-one correspondence with `jobs` @@ -692,7 +694,7 @@ def _run_task_get_arffcontent_parallel_helper( # noqa: PLR0913 rep_no: int, sample_no: int, task: OpenMLTask, - configuration: openml.config._Config | None = None, # type: ignore[name-defined] + configuration: _Config | None = None, ) -> tuple[ np.ndarray, pd.DataFrame | None, @@ -717,7 +719,7 @@ def _run_task_get_arffcontent_parallel_helper( # noqa: PLR0913 Sample number to be run. task : OpenMLTask The task object from OpenML. - configuration : openml.config._Config + configuration : _Config Hyperparameters to configure the model. Returns @@ -731,7 +733,7 @@ def _run_task_get_arffcontent_parallel_helper( # noqa: PLR0913 """ # Sets up the OpenML instantiated in the child process to match that of the parent's # if configuration=None, loads the default - openml.config._setup(configuration) + config._setup(configuration) train_indices, test_indices = task.get_train_test_split_indices( repeat=rep_no, @@ -760,7 +762,7 @@ def _run_task_get_arffcontent_parallel_helper( # noqa: PLR0913 f"task_class={task.__class__.__name__}" ) - openml.config.logger.info( + config.logger.info( f"Going to run model {model!s} on " f"dataset {openml.datasets.get_dataset(task.dataset_id).name} " f"for repeat {rep_no} fold {fold_no} sample {sample_no}" diff --git a/openml/setups/functions.py b/openml/setups/functions.py index a24d3a456..4bf279ed1 100644 --- a/openml/setups/functions.py +++ b/openml/setups/functions.py @@ -14,6 +14,7 @@ import openml import openml.exceptions import openml.utils +from openml import config from openml.flows import OpenMLFlow, flow_exists from .setup import OpenMLParameter, OpenMLSetup @@ -83,7 +84,7 @@ def _get_cached_setup(setup_id: int) -> OpenMLSetup: OpenMLCacheException If the setup file for the given setup ID is not cached. """ - cache_dir = Path(openml.config.get_cache_directory()) + cache_dir = Path(config.get_cache_directory()) setup_cache_dir = cache_dir / "setups" / str(setup_id) try: setup_file = setup_cache_dir / "description.xml" @@ -111,7 +112,7 @@ def get_setup(setup_id: int) -> OpenMLSetup: ------- OpenMLSetup (an initialized openml setup object) """ - setup_dir = Path(openml.config.get_cache_directory()) / "setups" / str(setup_id) + setup_dir = Path(config.get_cache_directory()) / "setups" / str(setup_id) setup_dir.mkdir(exist_ok=True, parents=True) setup_file = setup_dir / "description.xml" diff --git a/openml/setups/setup.py b/openml/setups/setup.py index 6c63b88ef..0960ad4c1 100644 --- a/openml/setups/setup.py +++ b/openml/setups/setup.py @@ -3,6 +3,7 @@ from typing import Any +import openml.config import openml.flows diff --git a/openml/study/functions.py b/openml/study/functions.py index 367537773..bb24ddcff 100644 --- a/openml/study/functions.py +++ b/openml/study/functions.py @@ -8,8 +8,8 @@ import pandas as pd import xmltodict -import openml import openml._api_calls +import openml.config import openml.utils from openml.study.study import OpenMLBenchmarkSuite, OpenMLStudy diff --git a/openml/study/study.py b/openml/study/study.py index 803c6455b..7a9c80bbe 100644 --- a/openml/study/study.py +++ b/openml/study/study.py @@ -5,8 +5,8 @@ from collections.abc import Sequence from typing import Any -import openml from openml.base import OpenMLBase +from openml.config import get_server_base_url class BaseStudy(OpenMLBase): @@ -111,7 +111,7 @@ def _get_repr_body_fields(self) -> Sequence[tuple[str, str | int | list[str]]]: fields["ID"] = self.study_id fields["Study URL"] = self.openml_url if self.creator is not None: - fields["Creator"] = f"{openml.config.get_server_base_url()}/u/{self.creator}" + fields["Creator"] = f"{get_server_base_url()}/u/{self.creator}" if self.creation_date is not None: fields["Upload Time"] = self.creation_date.replace("T", " ") if self.data is not None: diff --git a/openml/tasks/task.py b/openml/tasks/task.py index 202abac32..b297a105c 100644 --- a/openml/tasks/task.py +++ b/openml/tasks/task.py @@ -11,8 +11,8 @@ from typing import TYPE_CHECKING, Any from typing_extensions import TypedDict -import openml import openml._api_calls +import openml.config from openml import datasets from openml.base import OpenMLBase from openml.utils import _create_cache_directory_for_id diff --git a/openml/utils.py b/openml/utils.py index daa86ab50..3680bc0ff 100644 --- a/openml/utils.py +++ b/openml/utils.py @@ -19,6 +19,8 @@ import openml._api_calls import openml.exceptions +from . import config + # Avoid import cycles: https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles if TYPE_CHECKING: from openml.base import OpenMLBase @@ -327,7 +329,7 @@ def _list_all( # noqa: C901 def _get_cache_dir_for_key(key: str) -> Path: - return Path(openml.config.get_cache_directory()) / key + return Path(config.get_cache_directory()) / key def _create_cache_directory(key: str) -> Path: @@ -427,7 +429,7 @@ def safe_func(*args: P.args, **kwargs: P.kwargs) -> R: def _create_lockfiles_dir() -> Path: - path = Path(openml.config.get_cache_directory()) / "locks" + path = Path(config.get_cache_directory()) / "locks" # TODO(eddiebergman): Not sure why this is allowed to error and ignore??? with contextlib.suppress(OSError): path.mkdir(exist_ok=True, parents=True) diff --git a/tests/test_evaluations/test_evaluations_example.py b/tests/test_evaluations/test_evaluations_example.py index 7ea25e55c..a9ad7e8c1 100644 --- a/tests/test_evaluations/test_evaluations_example.py +++ b/tests/test_evaluations/test_evaluations_example.py @@ -2,14 +2,15 @@ from __future__ import annotations import unittest -import openml + +from openml.config import overwrite_config_context class TestEvaluationsExample(unittest.TestCase): def test_example_python_paper(self): # Example script which will appear in the upcoming OpenML-Python paper # This test ensures that the example will keep running! - with openml.config.overwrite_config_context( + with overwrite_config_context( { "server": "https://www.openml.org/api/v1/xml", "apikey": None, diff --git a/tests/test_openml/test_api_calls.py b/tests/test_openml/test_api_calls.py index 6b1cc64b1..a295259ef 100644 --- a/tests/test_openml/test_api_calls.py +++ b/tests/test_openml/test_api_calls.py @@ -9,6 +9,7 @@ import pytest import openml +from openml.config import ConfigurationForExamples import openml.testing from openml._api_calls import _download_minio_bucket, API_TOKEN_HELP_LINK diff --git a/tests/test_openml/test_config.py b/tests/test_openml/test_config.py index bcb37dcec..7ef223504 100644 --- a/tests/test_openml/test_config.py +++ b/tests/test_openml/test_config.py @@ -12,7 +12,7 @@ import pytest -import openml +import openml.config import openml.testing from openml.testing import TestBase From d43cf86f3869392976d70fdbeba0d140ac1e04f3 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 3 Feb 2026 12:35:57 +0500 Subject: [PATCH 51/54] implement _sync_api_config --- openml/config.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/openml/config.py b/openml/config.py index e6104fd7f..c266ae9d9 100644 --- a/openml/config.py +++ b/openml/config.py @@ -18,6 +18,8 @@ from typing_extensions import TypedDict from urllib.parse import urlparse +from openml.enums import RetryPolicy + logger = logging.getLogger(__name__) openml_logger = logging.getLogger("openml") console_handler: logging.StreamHandler | None = None @@ -206,6 +208,8 @@ def set_retry_policy(value: Literal["human", "robot"], n_retries: int | None = N retry_policy = value connection_n_retries = default_retries_by_policy[value] if n_retries is None else n_retries + _sync_api_config() + class ConfigurationForExamples: """Allows easy switching to and from a test configuration, used for examples.""" @@ -244,6 +248,8 @@ def start_using_configuration_for_example(cls) -> None: stacklevel=2, ) + _sync_api_config() + @classmethod def stop_using_configuration_for_example(cls) -> None: """Return to configuration as it was before `start_use_example_configuration`.""" @@ -262,6 +268,8 @@ def stop_using_configuration_for_example(cls) -> None: apikey = cast("str", cls._last_used_key) cls._start_last_called = False + _sync_api_config() + def _handle_xdg_config_home_backwards_compatibility( xdg_home: str, @@ -374,6 +382,8 @@ def _setup(config: _Config | None = None) -> None: short_cache_dir = Path(config["cachedir"]) _root_cache_directory = short_cache_dir.expanduser().resolve() + _sync_api_config() + try: cache_exists = _root_cache_directory.exists() # create the cache subdirectory @@ -408,6 +418,8 @@ def set_field_in_config_file(field: str, value: Any) -> None: if value is not None: fh.write(f"{f} = {value}\n") + _sync_api_config() + def _parse_config(config_file: str | Path) -> _Config: """Parse the config file, set up defaults.""" @@ -495,6 +507,8 @@ def set_root_cache_directory(root_cache_directory: str | Path) -> None: global _root_cache_directory # noqa: PLW0603 _root_cache_directory = Path(root_cache_directory) + _sync_api_config() + start_using_configuration_for_example = ( ConfigurationForExamples.start_using_configuration_for_example @@ -514,6 +528,28 @@ def overwrite_config_context(config: dict[str, Any]) -> Iterator[_Config]: _setup(existing_config) +def _sync_api_config() -> None: + """Sync the new API config with the legacy config in this file.""" + from ._api import APIBackend + + p = urlparse(server) + v1_server = f"{p.scheme}://{p.netloc}/" + v1_base_url = p.path.lstrip("/") + connection_retry_policy = RetryPolicy.HUMAN if retry_policy == "human" else RetryPolicy.ROBOT + cache_dir = str(_root_cache_directory) + + APIBackend.set_config_values( + { + "api_configs.v1.server": v1_server, + "api_configs.v1.base_url": v1_base_url, + "api_configs.v1.api_key": apikey, + "cache.dir": cache_dir, + "connection.retry_policy": connection_retry_policy, + "connection.retries": connection_n_retries, + } + ) + + __all__ = [ "get_cache_directory", "get_config_as_dict", From 3e323edff1787e01f8f9aa74e419f3f27fc9400b Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 3 Feb 2026 12:36:18 +0500 Subject: [PATCH 52/54] update tests with _sync_api_config --- openml/testing.py | 3 +++ tests/conftest.py | 3 +++ tests/test_datasets/test_dataset_functions.py | 6 ++++++ 3 files changed, 12 insertions(+) diff --git a/openml/testing.py b/openml/testing.py index a971aa1c3..a3d137916 100644 --- a/openml/testing.py +++ b/openml/testing.py @@ -110,6 +110,7 @@ def setUp(self, n_levels: int = 1, tmpdir_suffix: str = "") -> None: self.retry_policy = openml.config.retry_policy self.connection_n_retries = openml.config.connection_n_retries openml.config.set_retry_policy("robot", n_retries=20) + openml.config._sync_api_config() def use_production_server(self) -> None: """ @@ -119,6 +120,7 @@ def use_production_server(self) -> None: """ openml.config.server = self.production_server openml.config.apikey = "" + openml.config._sync_api_config() def tearDown(self) -> None: """Tear down the test""" @@ -132,6 +134,7 @@ def tearDown(self) -> None: openml.config.connection_n_retries = self.connection_n_retries openml.config.retry_policy = self.retry_policy + openml.config._sync_api_config() @classmethod def _mark_entity_for_removal( diff --git a/tests/conftest.py b/tests/conftest.py index bd974f3f3..bcf93bd72 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -99,6 +99,7 @@ def delete_remote_files(tracker, flow_names) -> None: """ openml.config.server = TestBase.test_server openml.config.apikey = TestBase.user_key + openml.config._sync_api_config() # reordering to delete sub flows at the end of flows # sub-flows have shorter names, hence, sorting by descending order of flow name length @@ -275,10 +276,12 @@ def with_server(request): if "production" in request.keywords: openml.config.server = "https://www.openml.org/api/v1/xml" openml.config.apikey = None + openml.config._sync_api_config() yield return openml.config.server = "https://test.openml.org/api/v1/xml" openml.config.apikey = TestBase.user_key + openml.config._sync_api_config() yield diff --git a/tests/test_datasets/test_dataset_functions.py b/tests/test_datasets/test_dataset_functions.py index c41664ba7..39a6c9cae 100644 --- a/tests/test_datasets/test_dataset_functions.py +++ b/tests/test_datasets/test_dataset_functions.py @@ -158,6 +158,7 @@ def test_check_datasets_active(self): [79], ) openml.config.server = self.test_server + openml.config._sync_api_config() @pytest.mark.uses_test_server() def test_illegal_character_tag(self): @@ -186,6 +187,7 @@ def test__name_to_id_with_deactivated(self): # /d/1 was deactivated assert openml.datasets.functions._name_to_id("anneal") == 2 openml.config.server = self.test_server + openml.config._sync_api_config() @pytest.mark.production() def test__name_to_id_with_multiple_active(self): @@ -438,6 +440,7 @@ def test__getarff_md5_issue(self): } n = openml.config.connection_n_retries openml.config.connection_n_retries = 1 + openml.config._sync_api_config() self.assertRaisesRegex( OpenMLHashException, @@ -448,6 +451,7 @@ def test__getarff_md5_issue(self): ) openml.config.connection_n_retries = n + openml.config._sync_api_config() @pytest.mark.uses_test_server() def test__get_dataset_features(self): @@ -617,6 +621,7 @@ def test_data_status(self): # admin key for test server (only admins can activate datasets. # all users can deactivate their own datasets) openml.config.apikey = TestBase.admin_key + openml.config._sync_api_config() openml.datasets.status_update(did, "active") self._assert_status_of_dataset(did=did, status="active") @@ -1555,6 +1560,7 @@ def test_list_datasets_with_high_size_parameter(self): # Reverting to test server openml.config.server = self.test_server + openml.config._sync_api_config() assert len(datasets_a) == len(datasets_b) From 9195fa6ea6de253141fe68e922fd414c85b1d806 Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 3 Feb 2026 12:51:44 +0500 Subject: [PATCH 53/54] rename config: timeout -> timeout_seconds --- openml/_api/clients/http.py | 6 +++--- openml/_api/setup/builder.py | 4 ++-- openml/_api/setup/config.py | 4 ++-- openml/testing.py | 10 +++++----- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/openml/_api/clients/http.py b/openml/_api/clients/http.py index 353cd5e9e..2c1e52d19 100644 --- a/openml/_api/clients/http.py +++ b/openml/_api/clients/http.py @@ -116,7 +116,7 @@ def __init__( # noqa: PLR0913 server: str, base_url: str, api_key: str, - timeout: int, + timeout_seconds: int, retries: int, retry_policy: RetryPolicy, cache: HTTPCache | None = None, @@ -124,7 +124,7 @@ def __init__( # noqa: PLR0913 self.server = server self.base_url = base_url self.api_key = api_key - self.timeout = timeout + self.timeout_seconds = timeout_seconds self.retries = retries self.retry_policy = retry_policy self.cache = cache @@ -343,7 +343,7 @@ def request( headers = request_kwargs.pop("headers", {}).copy() headers.update(self.headers) - timeout = request_kwargs.pop("timeout", self.timeout) + timeout = request_kwargs.pop("timeout", self.timeout_seconds) files = request_kwargs.pop("files", None) if use_cache and not reset_cache and self.cache is not None: diff --git a/openml/_api/setup/builder.py b/openml/_api/setup/builder.py index 750db431a..d411189ee 100644 --- a/openml/_api/setup/builder.py +++ b/openml/_api/setup/builder.py @@ -33,7 +33,7 @@ def build(cls, config: Config) -> APIBackendBuilder: server=primary_api_config.server, base_url=primary_api_config.base_url, api_key=primary_api_config.api_key, - timeout=config.connection.timeout, + timeout_seconds=config.connection.timeout_seconds, retries=config.connection.retries, retry_policy=config.connection.retry_policy, cache=http_cache, @@ -51,7 +51,7 @@ def build(cls, config: Config) -> APIBackendBuilder: server=fallback_api_config.server, base_url=fallback_api_config.base_url, api_key=fallback_api_config.api_key, - timeout=config.connection.timeout, + timeout_seconds=config.connection.timeout_seconds, retries=config.connection.retries, retry_policy=config.connection.retry_policy, cache=http_cache, diff --git a/openml/_api/setup/config.py b/openml/_api/setup/config.py index ea868262a..8e8fc1f5d 100644 --- a/openml/_api/setup/config.py +++ b/openml/_api/setup/config.py @@ -18,7 +18,7 @@ class APIConfig: class ConnectionConfig: retries: int retry_policy: RetryPolicy - timeout: int + timeout_seconds: int @dataclass @@ -51,7 +51,7 @@ class Config: default_factory=lambda: ConnectionConfig( retries=5, retry_policy=RetryPolicy.HUMAN, - timeout=10, + timeout_seconds=10, ) ) diff --git a/openml/testing.py b/openml/testing.py index a3d137916..2087283d3 100644 --- a/openml/testing.py +++ b/openml/testing.py @@ -286,7 +286,7 @@ class TestAPIBase(unittest.TestCase): server: str base_url: str api_key: str - timeout: int + timeout_seconds: int retries: int retry_policy: RetryPolicy dir: str @@ -298,7 +298,7 @@ def setUp(self) -> None: self.server = "https://test.openml.org/" self.base_url = "api/v1/xml" self.api_key = "normaluser" - self.timeout = 10 + self.timeout_seconds = 10 self.retries = 3 self.retry_policy = RetryPolicy.HUMAN self.dir = "test_cache" @@ -312,7 +312,7 @@ def setUp(self) -> None: server=self.server, base_url=self.base_url, api_key=self.api_key, - timeout=self.timeout, + timeout_seconds=self.timeout_seconds, retries=self.retries, retry_policy=self.retry_policy, cache=self.cache, @@ -340,7 +340,7 @@ def _get_http_client( # noqa: PLR0913 server: str, base_url: str, api_key: str, - timeout: int, + timeout_seconds: int, retries: int, retry_policy: RetryPolicy, cache: HTTPCache | None = None, @@ -349,7 +349,7 @@ def _get_http_client( # noqa: PLR0913 server=server, base_url=base_url, api_key=api_key, - timeout=timeout, + timeout_seconds=timeout_seconds, retries=retries, retry_policy=retry_policy, cache=cache, From 5342eec3716e1c50ee020156702bb658d7e37cba Mon Sep 17 00:00:00 2001 From: geetu040 Date: Tue, 3 Feb 2026 12:57:07 +0500 Subject: [PATCH 54/54] use timedelta for default ttl value --- openml/_api/setup/config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/openml/_api/setup/config.py b/openml/_api/setup/config.py index 8e8fc1f5d..9b87ffbaf 100644 --- a/openml/_api/setup/config.py +++ b/openml/_api/setup/config.py @@ -1,6 +1,7 @@ from __future__ import annotations from dataclasses import dataclass, field +from datetime import timedelta from openml.enums import APIVersion, RetryPolicy @@ -58,6 +59,6 @@ class Config: cache: CacheConfig = field( default_factory=lambda: CacheConfig( dir=str(_resolve_default_cache_dir()), - ttl=60 * 60 * 24 * 7, + ttl=int(timedelta(weeks=1).total_seconds()), ) )