-
-
Notifications
You must be signed in to change notification settings - Fork 270
Expand file tree
/
Copy pathtest_utils.py
More file actions
181 lines (136 loc) · 6.31 KB
/
test_utils.py
File metadata and controls
181 lines (136 loc) · 6.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
from __future__ import annotations
import os
import unittest.mock
import pytest
import openml
from openml.testing import _check_dataset
@pytest.fixture()
def min_number_tasks_on_test_server() -> int:
"""After a reset at least 1068 tasks are on the test server"""
return 1068
@pytest.fixture()
def min_number_datasets_on_test_server() -> int:
"""After a reset at least 127 datasets are on the test server"""
return 127
@pytest.fixture()
def min_number_flows_on_test_server() -> int:
"""After a reset at least 127 flows are on the test server"""
return 15
@pytest.fixture()
def min_number_setups_on_test_server() -> int:
"""After a reset at least 20 setups are on the test server"""
return 50
@pytest.fixture()
def min_number_runs_on_test_server() -> int:
"""After a reset at least 21 runs are on the test server"""
return 15
@pytest.fixture()
def min_number_evaluations_on_test_server() -> int:
"""After a reset at least 8 evaluations are on the test server"""
return 8
def _mocked_perform_api_call(call, request_method):
url = openml.config.server + call
return openml._api_calls._download_text_file(url)
@pytest.mark.test_server()
def test_list_all():
openml.utils._list_all(listing_call=openml.tasks.functions._list_tasks)
@pytest.mark.test_server()
def test_list_all_for_tasks(min_number_tasks_on_test_server):
tasks = openml.tasks.list_tasks(size=min_number_tasks_on_test_server)
assert min_number_tasks_on_test_server == len(tasks)
@pytest.mark.test_server()
def test_list_all_with_multiple_batches(min_number_tasks_on_test_server):
# By setting the batch size one lower than the minimum we guarantee at least two
# batches and at the same time do as few batches (roundtrips) as possible.
batch_size = min_number_tasks_on_test_server - 1
batches = openml.utils._list_all(
listing_call=openml.tasks.functions._list_tasks,
batch_size=batch_size,
)
assert len(batches) >= 2
assert min_number_tasks_on_test_server <= sum(len(batch) for batch in batches)
@pytest.mark.test_server()
def test_list_all_for_datasets(min_number_datasets_on_test_server):
datasets = openml.datasets.list_datasets(
size=min_number_datasets_on_test_server,
)
assert min_number_datasets_on_test_server == len(datasets)
for dataset in datasets.to_dict(orient="index").values():
_check_dataset(dataset)
@pytest.mark.test_server()
def test_list_all_for_flows(min_number_flows_on_test_server):
flows = openml.flows.list_flows(size=min_number_flows_on_test_server)
assert min_number_flows_on_test_server == len(flows)
@pytest.mark.flaky() # Other tests might need to upload runs first
@pytest.mark.test_server()
def test_list_all_for_setups(min_number_setups_on_test_server):
# TODO apparently list_setups function does not support kwargs
setups = openml.setups.list_setups(size=min_number_setups_on_test_server)
assert min_number_setups_on_test_server == len(setups)
@pytest.mark.flaky() # Other tests might need to upload runs first
@pytest.mark.test_server()
def test_list_all_for_runs(min_number_runs_on_test_server):
runs = openml.runs.list_runs(size=min_number_runs_on_test_server)
assert min_number_runs_on_test_server == len(runs)
@pytest.mark.flaky() # Other tests might need to upload runs first
@pytest.mark.test_server()
def test_list_all_for_evaluations(min_number_evaluations_on_test_server):
# TODO apparently list_evaluations function does not support kwargs
evaluations = openml.evaluations.list_evaluations(
function="predictive_accuracy",
size=min_number_evaluations_on_test_server,
)
assert min_number_evaluations_on_test_server == len(evaluations)
@unittest.mock.patch("openml._api_calls._perform_api_call", side_effect=_mocked_perform_api_call)
@pytest.mark.test_server()
def test_list_all_few_results_available(_perform_api_call):
datasets = openml.datasets.list_datasets(size=1000, data_name="iris", data_version=1)
assert len(datasets) == 1, "only one iris dataset version 1 should be present"
assert _perform_api_call.call_count == 1, "expect just one call to get one dataset"
@unittest.skipIf(os.name == "nt", "https://github.com/openml/openml-python/issues/1033")
@unittest.mock.patch("openml.config.get_cache_directory")
def test__create_cache_directory(config_mock, tmp_path):
config_mock.return_value = tmp_path
openml.utils._create_cache_directory("abc")
assert (tmp_path / "abc").exists()
subdir = tmp_path / "def"
subdir.mkdir()
subdir.chmod(0o444)
config_mock.return_value = subdir
with pytest.raises(
openml.exceptions.OpenMLCacheException,
match="Cannot create cache directory",
):
openml.utils._create_cache_directory("ghi")
@pytest.mark.test_server()
def test_correct_test_server_download_state():
"""This test verifies that the test server downloads the data from the correct source.
If this tests fails, it is highly likely that the test server is not configured correctly.
Usually, this means that the test server is serving data from the task with the same ID from the production server.
That is, it serves parquet files wrongly associated with the test server's task.
"""
task = openml.tasks.get_task(119)
dataset = task.get_dataset()
assert len(dataset.features) == dataset.get_data()[0].shape[1]
@unittest.mock.patch("openml.config.get_cache_directory")
def test_get_cache_size(config_mock,tmp_path):
"""
Test that the OpenML cache size utility correctly reports the cache directory
size before and after fetching a dataset.
This test uses a temporary directory (tmp_path) as the cache location by
patching the configuration via config_mock. It verifies two conditions:
empty cache and after dataset fetch.
Parameters
----------
config_mock : unittest.mock.Mock
A mock that overrides the configured cache directory to point to tmp_path.
tmp_path : pathlib.Path
A pytest-provided temporary directory used as an isolated cache location.
"""
config_mock.return_value = tmp_path
cache_size = openml.utils.get_cache_size()
assert cache_size == 0
sub_dir = tmp_path / "subdir"
sub_dir.mkdir()
(sub_dir / "nested_file.txt").write_bytes(b"b" * 100)
assert openml.utils.get_cache_size() == 100