Skip to content

Commit 4b1bdf4

Browse files
Do not use Test-apikey for unittests that talk with Prod. (#1436)
* Do not use test api_key for production calls inside the unittests * Precommit checks
1 parent eed10f7 commit 4b1bdf4

File tree

10 files changed

+63
-52
lines changed

10 files changed

+63
-52
lines changed

openml/testing.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,15 @@ def setUp(self, n_levels: int = 1, tmpdir_suffix: str = "") -> None:
108108
self.connection_n_retries = openml.config.connection_n_retries
109109
openml.config.set_retry_policy("robot", n_retries=20)
110110

111+
def use_production_server(self) -> None:
112+
"""
113+
Use the production server for the OpenML API calls.
114+
115+
Please use this sparingly - it is better to use the test server.
116+
"""
117+
openml.config.server = self.production_server
118+
openml.config.apikey = ""
119+
111120
def tearDown(self) -> None:
112121
"""Tear down the test"""
113122
os.chdir(self.cwd)

tests/test_datasets/test_dataset.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ class OpenMLDatasetTest(TestBase):
2424

2525
def setUp(self):
2626
super().setUp()
27-
openml.config.server = self.production_server
27+
self.use_production_server()
2828

2929
# Load dataset id 2 - dataset 2 is interesting because it contains
3030
# missing values, categorical features etc.
@@ -344,7 +344,7 @@ class OpenMLDatasetTestSparse(TestBase):
344344

345345
def setUp(self):
346346
super().setUp()
347-
openml.config.server = self.production_server
347+
self.use_production_server()
348348

349349
self.sparse_dataset = openml.datasets.get_dataset(4136, download_data=False)
350350

tests/test_datasets/test_dataset_functions.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ def test_list_datasets_empty(self):
139139
@pytest.mark.production()
140140
def test_check_datasets_active(self):
141141
# Have to test on live because there is no deactivated dataset on the test server.
142-
openml.config.server = self.production_server
142+
self.use_production_server()
143143
active = openml.datasets.check_datasets_active(
144144
[2, 17, 79],
145145
raise_error_if_not_exist=False,
@@ -176,27 +176,27 @@ def test_illegal_length_tag(self):
176176
@pytest.mark.production()
177177
def test__name_to_id_with_deactivated(self):
178178
"""Check that an activated dataset is returned if an earlier deactivated one exists."""
179-
openml.config.server = self.production_server
179+
self.use_production_server()
180180
# /d/1 was deactivated
181181
assert openml.datasets.functions._name_to_id("anneal") == 2
182182
openml.config.server = self.test_server
183183

184184
@pytest.mark.production()
185185
def test__name_to_id_with_multiple_active(self):
186186
"""With multiple active datasets, retrieve the least recent active."""
187-
openml.config.server = self.production_server
187+
self.use_production_server()
188188
assert openml.datasets.functions._name_to_id("iris") == 61
189189

190190
@pytest.mark.production()
191191
def test__name_to_id_with_version(self):
192192
"""With multiple active datasets, retrieve the least recent active."""
193-
openml.config.server = self.production_server
193+
self.use_production_server()
194194
assert openml.datasets.functions._name_to_id("iris", version=3) == 969
195195

196196
@pytest.mark.production()
197197
def test__name_to_id_with_multiple_active_error(self):
198198
"""With multiple active datasets, retrieve the least recent active."""
199-
openml.config.server = self.production_server
199+
self.use_production_server()
200200
self.assertRaisesRegex(
201201
ValueError,
202202
"Multiple active datasets exist with name 'iris'.",
@@ -272,12 +272,12 @@ def test_get_dataset_uint8_dtype(self):
272272
@pytest.mark.production()
273273
def test_get_dataset_cannot_access_private_data(self):
274274
# Issue324 Properly handle private datasets when trying to access them
275-
openml.config.server = self.production_server
275+
self.use_production_server()
276276
self.assertRaises(OpenMLPrivateDatasetError, openml.datasets.get_dataset, 45)
277277

278278
@pytest.mark.skip("Need to find dataset name of private dataset")
279279
def test_dataset_by_name_cannot_access_private_data(self):
280-
openml.config.server = self.production_server
280+
self.use_production_server()
281281
self.assertRaises(OpenMLPrivateDatasetError, openml.datasets.get_dataset, "NAME_GOES_HERE")
282282

283283
def test_get_dataset_lazy_all_functions(self):
@@ -1501,7 +1501,7 @@ def test_data_fork(self):
15011501
@pytest.mark.production()
15021502
def test_list_datasets_with_high_size_parameter(self):
15031503
# Testing on prod since concurrent deletion of uploded datasets make the test fail
1504-
openml.config.server = self.production_server
1504+
self.use_production_server()
15051505

15061506
datasets_a = openml.datasets.list_datasets()
15071507
datasets_b = openml.datasets.list_datasets(size=np.inf)

tests/test_evaluations/test_evaluation_functions.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def _check_list_evaluation_setups(self, **kwargs):
5252

5353
@pytest.mark.production()
5454
def test_evaluation_list_filter_task(self):
55-
openml.config.server = self.production_server
55+
self.use_production_server()
5656

5757
task_id = 7312
5858

@@ -72,7 +72,7 @@ def test_evaluation_list_filter_task(self):
7272

7373
@pytest.mark.production()
7474
def test_evaluation_list_filter_uploader_ID_16(self):
75-
openml.config.server = self.production_server
75+
self.use_production_server()
7676

7777
uploader_id = 16
7878
evaluations = openml.evaluations.list_evaluations(
@@ -87,7 +87,7 @@ def test_evaluation_list_filter_uploader_ID_16(self):
8787

8888
@pytest.mark.production()
8989
def test_evaluation_list_filter_uploader_ID_10(self):
90-
openml.config.server = self.production_server
90+
self.use_production_server()
9191

9292
setup_id = 10
9393
evaluations = openml.evaluations.list_evaluations(
@@ -106,7 +106,7 @@ def test_evaluation_list_filter_uploader_ID_10(self):
106106

107107
@pytest.mark.production()
108108
def test_evaluation_list_filter_flow(self):
109-
openml.config.server = self.production_server
109+
self.use_production_server()
110110

111111
flow_id = 100
112112

@@ -126,7 +126,7 @@ def test_evaluation_list_filter_flow(self):
126126

127127
@pytest.mark.production()
128128
def test_evaluation_list_filter_run(self):
129-
openml.config.server = self.production_server
129+
self.use_production_server()
130130

131131
run_id = 12
132132

@@ -146,7 +146,7 @@ def test_evaluation_list_filter_run(self):
146146

147147
@pytest.mark.production()
148148
def test_evaluation_list_limit(self):
149-
openml.config.server = self.production_server
149+
self.use_production_server()
150150

151151
evaluations = openml.evaluations.list_evaluations(
152152
"predictive_accuracy",
@@ -164,7 +164,7 @@ def test_list_evaluations_empty(self):
164164

165165
@pytest.mark.production()
166166
def test_evaluation_list_per_fold(self):
167-
openml.config.server = self.production_server
167+
self.use_production_server()
168168
size = 1000
169169
task_ids = [6]
170170
uploader_ids = [1]
@@ -202,7 +202,7 @@ def test_evaluation_list_per_fold(self):
202202

203203
@pytest.mark.production()
204204
def test_evaluation_list_sort(self):
205-
openml.config.server = self.production_server
205+
self.use_production_server()
206206
size = 10
207207
task_id = 6
208208
# Get all evaluations of the task
@@ -239,7 +239,7 @@ def test_list_evaluation_measures(self):
239239

240240
@pytest.mark.production()
241241
def test_list_evaluations_setups_filter_flow(self):
242-
openml.config.server = self.production_server
242+
self.use_production_server()
243243
flow_id = [405]
244244
size = 100
245245
evals = self._check_list_evaluation_setups(flows=flow_id, size=size)
@@ -257,7 +257,7 @@ def test_list_evaluations_setups_filter_flow(self):
257257

258258
@pytest.mark.production()
259259
def test_list_evaluations_setups_filter_task(self):
260-
openml.config.server = self.production_server
260+
self.use_production_server()
261261
task_id = [6]
262262
size = 121
263263
self._check_list_evaluation_setups(tasks=task_id, size=size)

tests/test_flows/test_flow.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def tearDown(self):
4848
def test_get_flow(self):
4949
# We need to use the production server here because 4024 is not the
5050
# test server
51-
openml.config.server = self.production_server
51+
self.use_production_server()
5252

5353
flow = openml.flows.get_flow(4024)
5454
assert isinstance(flow, openml.OpenMLFlow)
@@ -82,7 +82,7 @@ def test_get_structure(self):
8282
# also responsible for testing: flow.get_subflow
8383
# We need to use the production server here because 4024 is not the
8484
# test server
85-
openml.config.server = self.production_server
85+
self.use_production_server()
8686

8787
flow = openml.flows.get_flow(4024)
8888
flow_structure_name = flow.get_structure("name")
@@ -558,7 +558,7 @@ def test_extract_tags(self):
558558

559559
@pytest.mark.production()
560560
def test_download_non_scikit_learn_flows(self):
561-
openml.config.server = self.production_server
561+
self.use_production_server()
562562

563563
flow = openml.flows.get_flow(6742)
564564
assert isinstance(flow, openml.OpenMLFlow)

tests/test_flows/test_flow_functions.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def _check_flow(self, flow):
4848

4949
@pytest.mark.production()
5050
def test_list_flows(self):
51-
openml.config.server = self.production_server
51+
self.use_production_server()
5252
# We can only perform a smoke test here because we test on dynamic
5353
# data from the internet...
5454
flows = openml.flows.list_flows()
@@ -59,7 +59,7 @@ def test_list_flows(self):
5959

6060
@pytest.mark.production()
6161
def test_list_flows_output_format(self):
62-
openml.config.server = self.production_server
62+
self.use_production_server()
6363
# We can only perform a smoke test here because we test on dynamic
6464
# data from the internet...
6565
flows = openml.flows.list_flows()
@@ -68,21 +68,22 @@ def test_list_flows_output_format(self):
6868

6969
@pytest.mark.production()
7070
def test_list_flows_empty(self):
71+
self.use_production_server()
7172
openml.config.server = self.production_server
7273
flows = openml.flows.list_flows(tag="NoOneEverUsesThisTag123")
7374
assert flows.empty
7475

7576
@pytest.mark.production()
7677
def test_list_flows_by_tag(self):
77-
openml.config.server = self.production_server
78+
self.use_production_server()
7879
flows = openml.flows.list_flows(tag="weka")
7980
assert len(flows) >= 5
8081
for flow in flows.to_dict(orient="index").values():
8182
self._check_flow(flow)
8283

8384
@pytest.mark.production()
8485
def test_list_flows_paginate(self):
85-
openml.config.server = self.production_server
86+
self.use_production_server()
8687
size = 10
8788
maximum = 100
8889
for i in range(0, maximum, size):
@@ -302,7 +303,7 @@ def test_sklearn_to_flow_list_of_lists(self):
302303
def test_get_flow1(self):
303304
# Regression test for issue #305
304305
# Basically, this checks that a flow without an external version can be loaded
305-
openml.config.server = self.production_server
306+
self.use_production_server()
306307
flow = openml.flows.get_flow(1)
307308
assert flow.external_version is None
308309

@@ -335,7 +336,7 @@ def test_get_flow_reinstantiate_model_no_extension(self):
335336
)
336337
@pytest.mark.production()
337338
def test_get_flow_with_reinstantiate_strict_with_wrong_version_raises_exception(self):
338-
openml.config.server = self.production_server
339+
self.use_production_server()
339340
flow = 8175
340341
expected = "Trying to deserialize a model with dependency sklearn==0.19.1 not satisfied."
341342
self.assertRaisesRegex(
@@ -356,7 +357,7 @@ def test_get_flow_with_reinstantiate_strict_with_wrong_version_raises_exception(
356357
)
357358
@pytest.mark.production()
358359
def test_get_flow_reinstantiate_flow_not_strict_post_1(self):
359-
openml.config.server = self.production_server
360+
self.use_production_server()
360361
flow = openml.flows.get_flow(flow_id=19190, reinstantiate=True, strict_version=False)
361362
assert flow.flow_id is None
362363
assert "sklearn==1.0.0" not in flow.dependencies
@@ -370,7 +371,7 @@ def test_get_flow_reinstantiate_flow_not_strict_post_1(self):
370371
)
371372
@pytest.mark.production()
372373
def test_get_flow_reinstantiate_flow_not_strict_023_and_024(self):
373-
openml.config.server = self.production_server
374+
self.use_production_server()
374375
flow = openml.flows.get_flow(flow_id=18587, reinstantiate=True, strict_version=False)
375376
assert flow.flow_id is None
376377
assert "sklearn==0.23.1" not in flow.dependencies
@@ -382,7 +383,7 @@ def test_get_flow_reinstantiate_flow_not_strict_023_and_024(self):
382383
)
383384
@pytest.mark.production()
384385
def test_get_flow_reinstantiate_flow_not_strict_pre_023(self):
385-
openml.config.server = self.production_server
386+
self.use_production_server()
386387
flow = openml.flows.get_flow(flow_id=8175, reinstantiate=True, strict_version=False)
387388
assert flow.flow_id is None
388389
assert "sklearn==0.19.1" not in flow.dependencies

tests/test_runs/test_run_functions.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1083,7 +1083,7 @@ def test_local_run_metric_score(self):
10831083

10841084
@pytest.mark.production()
10851085
def test_online_run_metric_score(self):
1086-
openml.config.server = self.production_server
1086+
self.use_production_server()
10871087

10881088
# important to use binary classification task,
10891089
# due to assertions
@@ -1388,7 +1388,7 @@ def test__create_trace_from_arff(self):
13881388
@pytest.mark.production()
13891389
def test_get_run(self):
13901390
# this run is not available on test
1391-
openml.config.server = self.production_server
1391+
self.use_production_server()
13921392
run = openml.runs.get_run(473351)
13931393
assert run.dataset_id == 357
13941394
assert run.evaluations["f_measure"] == 0.841225
@@ -1424,7 +1424,7 @@ def _check_run(self, run):
14241424
@pytest.mark.production()
14251425
def test_get_runs_list(self):
14261426
# TODO: comes from live, no such lists on test
1427-
openml.config.server = self.production_server
1427+
self.use_production_server()
14281428
runs = openml.runs.list_runs(id=[2], display_errors=True)
14291429
assert len(runs) == 1
14301430
for run in runs.to_dict(orient="index").values():
@@ -1437,7 +1437,7 @@ def test_list_runs_empty(self):
14371437
@pytest.mark.production()
14381438
def test_get_runs_list_by_task(self):
14391439
# TODO: comes from live, no such lists on test
1440-
openml.config.server = self.production_server
1440+
self.use_production_server()
14411441
task_ids = [20]
14421442
runs = openml.runs.list_runs(task=task_ids)
14431443
assert len(runs) >= 590
@@ -1456,7 +1456,7 @@ def test_get_runs_list_by_task(self):
14561456
@pytest.mark.production()
14571457
def test_get_runs_list_by_uploader(self):
14581458
# TODO: comes from live, no such lists on test
1459-
openml.config.server = self.production_server
1459+
self.use_production_server()
14601460
# 29 is Dominik Kirchhoff
14611461
uploader_ids = [29]
14621462

@@ -1478,7 +1478,7 @@ def test_get_runs_list_by_uploader(self):
14781478
@pytest.mark.production()
14791479
def test_get_runs_list_by_flow(self):
14801480
# TODO: comes from live, no such lists on test
1481-
openml.config.server = self.production_server
1481+
self.use_production_server()
14821482
flow_ids = [1154]
14831483
runs = openml.runs.list_runs(flow=flow_ids)
14841484
assert len(runs) >= 1
@@ -1497,7 +1497,7 @@ def test_get_runs_list_by_flow(self):
14971497
@pytest.mark.production()
14981498
def test_get_runs_pagination(self):
14991499
# TODO: comes from live, no such lists on test
1500-
openml.config.server = self.production_server
1500+
self.use_production_server()
15011501
uploader_ids = [1]
15021502
size = 10
15031503
max = 100
@@ -1510,7 +1510,7 @@ def test_get_runs_pagination(self):
15101510
@pytest.mark.production()
15111511
def test_get_runs_list_by_filters(self):
15121512
# TODO: comes from live, no such lists on test
1513-
openml.config.server = self.production_server
1513+
self.use_production_server()
15141514
ids = [505212, 6100]
15151515
tasks = [2974, 339]
15161516
uploaders_1 = [1, 2]
@@ -1548,7 +1548,8 @@ def test_get_runs_list_by_filters(self):
15481548
def test_get_runs_list_by_tag(self):
15491549
# TODO: comes from live, no such lists on test
15501550
# Unit test works on production server only
1551-
openml.config.server = self.production_server
1551+
1552+
self.use_production_server()
15521553
runs = openml.runs.list_runs(tag="curves")
15531554
assert len(runs) >= 1
15541555

@@ -1663,7 +1664,7 @@ def test_run_flow_on_task_downloaded_flow(self):
16631664
@pytest.mark.production()
16641665
def test_format_prediction_non_supervised(self):
16651666
# non-supervised tasks don't exist on the test server
1666-
openml.config.server = self.production_server
1667+
self.use_production_server()
16671668
clustering = openml.tasks.get_task(126033, download_data=False)
16681669
ignored_input = [0] * 5
16691670
with pytest.raises(

0 commit comments

Comments
 (0)