Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
56 commits
Select commit Hold shift + click to select a range
b45f6f2
Adding importable helper functions
Neeratyoy Oct 29, 2020
8e7ea0b
Changing import of cat, cont
Neeratyoy Oct 29, 2020
102a084
Merge branch 'develop' into fix_773
Neeratyoy Oct 29, 2020
18a2dba
Better docstrings
Neeratyoy Oct 30, 2020
381c267
Adding unit test to check ColumnTransformer
Neeratyoy Oct 30, 2020
5dbff2e
Refinements from @mfeurer
Neeratyoy Nov 2, 2020
fc4ec73
Editing example to support both NumPy and Pandas
Neeratyoy Nov 2, 2020
8d5cad9
Merge branch 'develop' into fix_773
Neeratyoy Nov 3, 2020
3d66404
Merge branch 'develop' into fix_773
Neeratyoy Nov 4, 2020
90c8de6
Unit test fix to mark for deletion
Neeratyoy Nov 4, 2020
e0af15e
Making some unit tests work
Neeratyoy Nov 10, 2020
14aa11d
Waiting for dataset to be processed
Neeratyoy Nov 16, 2020
31d48d8
Minor test collection fix
Neeratyoy Nov 16, 2020
431447c
Template to handle missing tasks
Neeratyoy Nov 30, 2020
cc3199e
Accounting for more missing tasks:
Neeratyoy Nov 30, 2020
8a29668
Fixing some more unit tests
Neeratyoy Nov 30, 2020
405e03c
Simplifying check_task_existence
Neeratyoy Nov 30, 2020
caf4f46
black changes
Neeratyoy Dec 4, 2020
b308e71
Minor formatting
Neeratyoy Dec 8, 2020
436a9fe
Handling task exists check
Neeratyoy Dec 9, 2020
ddd8b04
Testing edited check task func
Neeratyoy Dec 14, 2020
74ae622
Merge branch 'fix_unit_tests' of https://github.com/openml/openml-pyt…
Neeratyoy Dec 14, 2020
50ce90e
Flake fix
Neeratyoy Dec 15, 2020
aea2832
Updating with fixed unit tests from PR #1000
Neeratyoy Dec 15, 2020
56cd639
More retries on connection error
Neeratyoy Dec 16, 2020
8e8ea2e
Adding max_retries to config default
Neeratyoy Dec 17, 2020
d518beb
Update database retry unit test
Neeratyoy Dec 17, 2020
37d9f6b
Print to debug hash exception
Neeratyoy Dec 17, 2020
9bd4892
Fixing checksum unit test
Neeratyoy Dec 17, 2020
dc41b5d
Retry on _download_text_file
Neeratyoy Dec 18, 2020
396cb8d
Update datasets_tutorial.py
mfeurer Dec 21, 2020
8f380de
Update custom_flow_tutorial.py
mfeurer Dec 21, 2020
bc1745e
Update test_study_functions.py
mfeurer Dec 21, 2020
d95b5e6
Update test_dataset_functions.py
mfeurer Dec 21, 2020
d58ca5a
Merge branch 'fix_unit_tests' into fix_773
Neeratyoy Dec 21, 2020
91c6cf5
more retries, but also more time between retries
mfeurer Dec 21, 2020
b43a0e0
Merge branch 'fix_unit_tests' of https://github.com/openml/openml-pyt…
Neeratyoy Dec 21, 2020
a9430b3
allow for even more retries on get calls
mfeurer Dec 21, 2020
e9cfba8
Catching failed get task
Neeratyoy Dec 21, 2020
c13f6ce
Merge branch 'fix_unit_tests' of https://github.com/openml/openml-pyt…
Neeratyoy Dec 21, 2020
3d7abc2
undo stupid change
mfeurer Dec 21, 2020
94576b1
Merge branch 'fix_unit_tests' of https://github.com/openml/openml-pyt…
Neeratyoy Dec 21, 2020
b5e1242
fix one more test
mfeurer Dec 21, 2020
d764aad
Merge branch 'fix_unit_tests' into fix_773
Neeratyoy Dec 21, 2020
f5e4a3e
Refactoring md5 hash check inside _send_request
Neeratyoy Dec 21, 2020
c065dfc
Merge branch 'fix_unit_tests' into fix_773
Neeratyoy Dec 21, 2020
07ce722
Fixing a fairly common unit test fail
Neeratyoy Dec 22, 2020
82e1b72
Reverting loose check on unit test
Neeratyoy Dec 23, 2020
936c252
Merge branch 'fix_unit_tests' into fix_773
Neeratyoy Dec 23, 2020
fc8b464
Merge branch 'develop' into fix_773
PGijsbers Dec 24, 2020
46ab043
Fixing integer type check to allow np.integer
Neeratyoy Jan 22, 2021
1be82c3
Trying to loosen check on unit test as fix
Neeratyoy Jan 25, 2021
dfbf5e5
Examples support for pandas=1.2.1
Neeratyoy Jan 27, 2021
b611f9f
pandas indexing as iloc
Neeratyoy Jan 27, 2021
93833c3
fix example: actually load the different tasks
mfeurer Jan 28, 2021
f6aa7ed
Renaming custom flow to disable tutorial (#1019)
Neeratyoy Jan 28, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 59 additions & 9 deletions examples/30_extended/flows_and_runs_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
# License: BSD 3-Clause

import openml
import numpy as np
from sklearn import compose, ensemble, impute, neighbors, preprocessing, pipeline, tree

############################################################################
Expand Down Expand Up @@ -83,33 +84,32 @@
#
# When you need to handle 'dirty' data, build pipelines to model then automatically.
task = openml.tasks.get_task(1)
features = task.get_dataset().features
nominal_feature_indices = [
i
for i in range(len(features))
if features[i].name != task.target_name and features[i].data_type == "nominal"
]

# OpenML helper functions for sklearn can be plugged in directly for complicated pipelines
from openml.extensions.sklearn import cat, cont

pipe = pipeline.Pipeline(
steps=[
(
"Preprocessing",
compose.ColumnTransformer(
[
(
"Nominal",
"categorical",
pipeline.Pipeline(
[
("Imputer", impute.SimpleImputer(strategy="most_frequent")),
(
"Encoder",
preprocessing.OneHotEncoder(
sparse=False, handle_unknown="ignore",
sparse=False, handle_unknown="ignore"
),
),
]
),
nominal_feature_indices,
cat, # returns the categorical feature indices
),
("continuous", "passthrough", cont), # returns the numeric feature indices
]
),
),
Expand All @@ -121,6 +121,56 @@
myrun = run.publish()
print("Uploaded to http://test.openml.org/r/" + str(myrun.run_id))


# The above pipeline works with the helper functions that internally deal with pandas DataFrame.
# In the case, pandas is not available, or a NumPy based data processing is the requirement, the
# above pipeline is presented below to work with NumPy.

# Extracting the indices of the categorical columns
features = task.get_dataset().features
categorical_feature_indices = []
numeric_feature_indices = []
for i in range(len(features)):
if features[i].name == task.target_name:
continue
if features[i].data_type == "nominal":
categorical_feature_indices.append(i)
else:
numeric_feature_indices.append(i)

pipe = pipeline.Pipeline(
steps=[
(
"Preprocessing",
compose.ColumnTransformer(
[
(
"categorical",
pipeline.Pipeline(
[
("Imputer", impute.SimpleImputer(strategy="most_frequent")),
(
"Encoder",
preprocessing.OneHotEncoder(
sparse=False, handle_unknown="ignore"
),
),
]
),
categorical_feature_indices,
),
("continuous", "passthrough", numeric_feature_indices),
]
),
),
("Classifier", ensemble.RandomForestClassifier(n_estimators=10)),
]
)

run = openml.runs.run_model_on_task(pipe, task, avoid_duplicate_runs=False, dataset_format="array")
myrun = run.publish()
print("Uploaded to http://test.openml.org/r/" + str(myrun.run_id))

###############################################################################
# Running flows on tasks offline for later upload
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Expand Down
11 changes: 2 additions & 9 deletions examples/30_extended/run_setup_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@

import numpy as np
import openml
from openml.extensions.sklearn import cat, cont

from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
Expand All @@ -57,15 +59,6 @@
# easy as you want it to be


# Helper functions to return required columns for ColumnTransformer
def cont(X):
return X.dtypes != "category"


def cat(X):
return X.dtypes == "category"


cat_imp = make_pipeline(
SimpleImputer(strategy="most_frequent"),
OneHotEncoder(handle_unknown="ignore", sparse=False),
Expand Down
37 changes: 20 additions & 17 deletions examples/30_extended/task_manual_iteration_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,11 @@
####################################################################################################
# And then split the data based on this:

X, y, _, _ = task.get_dataset().get_data(task.target_name)
X_train = X.loc[train_indices]
y_train = y[train_indices]
X_test = X.loc[test_indices]
y_test = y[test_indices]
X, y = task.get_X_and_y(dataset_format="dataframe")
X_train = X.iloc[train_indices]
y_train = y.iloc[train_indices]
X_test = X.iloc[test_indices]
y_test = y.iloc[test_indices]

print(
"X_train.shape: {}, y_train.shape: {}, X_test.shape: {}, y_test.shape: {}".format(
Expand All @@ -78,6 +78,7 @@

task_id = 3
task = openml.tasks.get_task(task_id)
X, y = task.get_X_and_y(dataset_format="dataframe")
n_repeats, n_folds, n_samples = task.get_split_dimensions()
print(
"Task {}: number of repeats: {}, number of folds: {}, number of samples {}.".format(
Expand All @@ -93,10 +94,10 @@
train_indices, test_indices = task.get_train_test_split_indices(
repeat=repeat_idx, fold=fold_idx, sample=sample_idx,
)
X_train = X.loc[train_indices]
y_train = y[train_indices]
X_test = X.loc[test_indices]
y_test = y[test_indices]
X_train = X.iloc[train_indices]
y_train = y.iloc[train_indices]
X_test = X.iloc[test_indices]
y_test = y.iloc[test_indices]

print(
"Repeat #{}, fold #{}, samples {}: X_train.shape: {}, "
Expand All @@ -116,6 +117,7 @@

task_id = 1767
task = openml.tasks.get_task(task_id)
X, y = task.get_X_and_y(dataset_format="dataframe")
n_repeats, n_folds, n_samples = task.get_split_dimensions()
print(
"Task {}: number of repeats: {}, number of folds: {}, number of samples {}.".format(
Expand All @@ -131,10 +133,10 @@
train_indices, test_indices = task.get_train_test_split_indices(
repeat=repeat_idx, fold=fold_idx, sample=sample_idx,
)
X_train = X.loc[train_indices]
y_train = y[train_indices]
X_test = X.loc[test_indices]
y_test = y[test_indices]
X_train = X.iloc[train_indices]
y_train = y.iloc[train_indices]
X_test = X.iloc[test_indices]
y_test = y.iloc[test_indices]

print(
"Repeat #{}, fold #{}, samples {}: X_train.shape: {}, "
Expand All @@ -154,6 +156,7 @@

task_id = 1702
task = openml.tasks.get_task(task_id)
X, y = task.get_X_and_y(dataset_format="dataframe")
n_repeats, n_folds, n_samples = task.get_split_dimensions()
print(
"Task {}: number of repeats: {}, number of folds: {}, number of samples {}.".format(
Expand All @@ -169,10 +172,10 @@
train_indices, test_indices = task.get_train_test_split_indices(
repeat=repeat_idx, fold=fold_idx, sample=sample_idx,
)
X_train = X.loc[train_indices]
y_train = y[train_indices]
X_test = X.loc[test_indices]
y_test = y[test_indices]
X_train = X.iloc[train_indices]
y_train = y.iloc[train_indices]
X_test = X.iloc[test_indices]
y_test = y.iloc[test_indices]

print(
"Repeat #{}, fold #{}, samples {}: X_train.shape: {}, "
Expand Down
28 changes: 28 additions & 0 deletions openml/extensions/sklearn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,31 @@
__all__ = ["SklearnExtension"]

register_extension(SklearnExtension)


def cont(X):
"""Returns True for all non-categorical columns, False for the rest.

This is a helper function for OpenML datasets encoded as DataFrames simplifying the handling
of mixed data types. To build sklearn models on mixed data types, a ColumnTransformer is
required to process each type of columns separately.
This function allows transformations meant for continuous/numeric columns to access the
continuous/numeric columns given the dataset as DataFrame.
"""
if not hasattr(X, "dtypes"):
raise AttributeError("Not a Pandas DataFrame with 'dtypes' as attribute!")
return X.dtypes != "category"


def cat(X):
"""Returns True for all categorical columns, False for the rest.

This is a helper function for OpenML datasets encoded as DataFrames simplifying the handling
of mixed data types. To build sklearn models on mixed data types, a ColumnTransformer is
required to process each type of columns separately.
This function allows transformations meant for categorical columns to access the
categorical columns given the dataset as DataFrame.
"""
if not hasattr(X, "dtypes"):
raise AttributeError("Not a Pandas DataFrame with 'dtypes' as attribute!")
return X.dtypes == "category"
7 changes: 5 additions & 2 deletions openml/runs/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

import sklearn.metrics
import xmltodict
import numpy as np
import pandas as pd

import openml
Expand Down Expand Up @@ -508,7 +509,9 @@ def _calculate_local_measure(sklearn_fn, openml_name):
for i, tst_idx in enumerate(test_indices):
if task.class_labels is not None:
prediction = (
task.class_labels[pred_y[i]] if isinstance(pred_y[i], int) else pred_y[i]
task.class_labels[pred_y[i]]
if isinstance(pred_y[i], (int, np.integer))
else pred_y[i]
)
if isinstance(test_y, pd.Series):
test_prediction = (
Expand All @@ -519,7 +522,7 @@ def _calculate_local_measure(sklearn_fn, openml_name):
else:
test_prediction = (
task.class_labels[test_y[i]]
if isinstance(test_y[i], int)
if isinstance(test_y[i], (int, np.integer))
else test_y[i]
)
pred_prob = proba_y.iloc[i] if isinstance(proba_y, pd.DataFrame) else proba_y[i]
Expand Down
10 changes: 1 addition & 9 deletions openml/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,12 +318,4 @@ class CustomImputer(SimpleImputer):
pass


def cont(X):
return X.dtypes != "category"


def cat(X):
return X.dtypes == "category"


__all__ = ["TestBase", "SimpleImputer", "CustomImputer", "cat", "cont", "check_task_existence"]
__all__ = ["TestBase", "SimpleImputer", "CustomImputer", "check_task_existence"]
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@
from openml.flows import OpenMLFlow
from openml.flows.functions import assert_flows_equal
from openml.runs.trace import OpenMLRunTrace
from openml.testing import TestBase, SimpleImputer, CustomImputer, cat, cont
from openml.testing import TestBase, SimpleImputer, CustomImputer
from openml.extensions.sklearn import cat, cont


this_directory = os.path.dirname(os.path.abspath(__file__))
Expand Down Expand Up @@ -2187,16 +2188,6 @@ def test_failed_serialization_of_custom_class(self):
# for lower versions
from sklearn.preprocessing import Imputer as SimpleImputer

class CustomImputer(SimpleImputer):
pass

def cont(X):
return X.dtypes != "category"

def cat(X):
return X.dtypes == "category"

import sklearn.metrics
import sklearn.tree
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.compose import ColumnTransformer
Expand All @@ -2219,3 +2210,38 @@ def cat(X):
raise AttributeError(e)
else:
raise Exception(e)

@unittest.skipIf(
LooseVersion(sklearn.__version__) < "0.20",
reason="columntransformer introduction in 0.20.0",
)
def test_setupid_with_column_transformer(self):
"""Test to check if inclusion of ColumnTransformer in a pipleline is treated as a new
flow each time.
"""
import sklearn.compose
from sklearn.svm import SVC

def column_transformer_pipe(task_id):
task = openml.tasks.get_task(task_id)
# make columntransformer
preprocessor = sklearn.compose.ColumnTransformer(
transformers=[
("num", StandardScaler(), cont),
("cat", OneHotEncoder(handle_unknown="ignore"), cat),
]
)
# make pipeline
clf = SVC(gamma="scale", random_state=1)
pipe = make_pipeline(preprocessor, clf)
# run task
run = openml.runs.run_model_on_task(pipe, task, avoid_duplicate_runs=False)
run.publish()
new_run = openml.runs.get_run(run.run_id)
return new_run

run1 = column_transformer_pipe(11) # only categorical
TestBase._mark_entity_for_removal("run", run1.run_id)
run2 = column_transformer_pipe(23) # only numeric
TestBase._mark_entity_for_removal("run", run2.run_id)
self.assertEqual(run1.setup_id, run2.setup_id)
3 changes: 2 additions & 1 deletion tests/test_runs/test_run_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@
import pandas as pd

import openml.extensions.sklearn
from openml.testing import TestBase, SimpleImputer, CustomImputer, cat, cont
from openml.testing import TestBase, SimpleImputer, CustomImputer
from openml.extensions.sklearn import cat, cont
from openml.runs.functions import _run_task_get_arffcontent, run_exists, format_prediction
from openml.runs.trace import OpenMLRunTrace
from openml.tasks import TaskType
Expand Down
3 changes: 2 additions & 1 deletion tests/test_study/test_study_examples.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# License: BSD 3-Clause

from openml.testing import TestBase, SimpleImputer, CustomImputer, cat, cont
from openml.testing import TestBase, SimpleImputer, CustomImputer
from openml.extensions.sklearn import cat, cont

import sklearn
import unittest
Expand Down