diff --git a/.flake8 b/.flake8 index c0fe5e06f..08bb8ea10 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 100 show-source = True -select = C,E,F,W,B +select = C,E,F,W,B,T ignore = E203, E402, W503 per-file-ignores = *__init__.py:F401 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 75e53f0dd..b3a1d2aba 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,6 +19,10 @@ repos: - id: flake8 name: flake8 openml files: openml/* + additional_dependencies: + - flake8-print==3.1.4 - id: flake8 name: flake8 tests files: tests/* + additional_dependencies: + - flake8-print==3.1.4 diff --git a/openml/extensions/sklearn/extension.py b/openml/extensions/sklearn/extension.py index af0b42144..071f262f3 100644 --- a/openml/extensions/sklearn/extension.py +++ b/openml/extensions/sklearn/extension.py @@ -1312,7 +1312,7 @@ def _prevent_optimize_n_jobs(self, model): "Could not find attribute " "param_distributions." ) - print( + logger.warning( "Warning! Using subclass BaseSearchCV other than " "{GridSearchCV, RandomizedSearchCV}. " "Should implement param check. " diff --git a/tests/conftest.py b/tests/conftest.py index 59fa33aca..461a513fd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -40,7 +40,6 @@ # exploiting the fact that conftest.py always resides in the root directory for tests static_dir = os.path.dirname(os.path.abspath(__file__)) logger.info("static directory: {}".format(static_dir)) -print("static directory: {}".format(static_dir)) while True: if "openml" in os.listdir(static_dir): break diff --git a/tests/test_datasets/test_dataset_functions.py b/tests/test_datasets/test_dataset_functions.py index c196ea36e..a3be7b2b7 100644 --- a/tests/test_datasets/test_dataset_functions.py +++ b/tests/test_datasets/test_dataset_functions.py @@ -1160,7 +1160,9 @@ def test_publish_fetch_ignore_attribute(self): except Exception as e: # returned code 273: Dataset not processed yet # returned code 362: No qualities found - print("Failed to fetch dataset:{} with '{}'.".format(dataset.id, str(e))) + TestBase.logger.error( + "Failed to fetch dataset:{} with '{}'.".format(dataset.id, str(e)) + ) time.sleep(10) continue if downloaded_dataset is None: diff --git a/tests/test_study/test_study_examples.py b/tests/test_study/test_study_examples.py index 2c403aa84..14e2405f2 100644 --- a/tests/test_study/test_study_examples.py +++ b/tests/test_study/test_study_examples.py @@ -48,10 +48,12 @@ def test_Figure1a(self): clf, task, avoid_duplicate_runs=False ) # run classifier on splits (requires API key) score = run.get_metric_fn(sklearn.metrics.accuracy_score) # print accuracy score - print("Data set: %s; Accuracy: %0.2f" % (task.get_dataset().name, score.mean())) + TestBase.logger.info( + "Data set: %s; Accuracy: %0.2f" % (task.get_dataset().name, score.mean()) + ) run.publish() # publish the experiment on OpenML (optional) TestBase._mark_entity_for_removal("run", run.run_id) TestBase.logger.info( "collected from {}: {}".format(__file__.split("/")[-1], run.run_id) ) - print("URL for run: %s/run/%d" % (openml.config.server, run.run_id)) + TestBase.logger.info("URL for run: %s/run/%d" % (openml.config.server, run.run_id))