I am not sure what you mean by 10*10 cross validation. The ShuffleSplit configuration you give will make you call the fit method of the estimator 10 times. If you call this 10 times by explicitly using an outer loop or directly call it 100 times with 10% of the data reserved for testing in a single loop if you use instead:
>>> ss = ShuffleSplit(X.shape[0], n_iterations=100, test_fraction=0.1,
... random_state=42)
If you want to do 10 runs of StratifiedKFold with k=10 you can shuffle the dataset between the runs (that would lead to a total 100 calls to the fit method with a 90% train / 10% test split for each call to fit):
>>> from sklearn.utils import shuffle
>>> from sklearn.cross_validation import StratifiedKFold, cross_val_score
>>> for i in range(10):
... X, y = shuffle(X_orig, y_orig, random_state=i)
... skf = StratifiedKFold(y, 10)
... print cross_val_score(clf, X, y, cv=skf)
I had the same problem. Here is my solution: instead of averaging across the folds, I compute the precision_recall_curve
across the results from all folds, after the loop. According to the discussion in https://stats.stackexchange.com/questions/34611/meanscores-vs-scoreconcatenation-in-cross-validation this is a generally preferable approach.
import matplotlib.pyplot as plt
import numpy
from sklearn.datasets import make_blobs
from sklearn.metrics import precision_recall_curve, auc
from sklearn.model_selection import KFold
from sklearn.svm import SVC
FOLDS = 5
X, y = make_blobs(n_samples=1000, n_features=2, centers=2, cluster_std=10.0,
random_state=12345)
f, axes = plt.subplots(1, 2, figsize=(10, 5))
axes[0].scatter(X[y==0,0], X[y==0,1], color='blue', s=2, label='y=0')
axes[0].scatter(X[y!=0,0], X[y!=0,1], color='red', s=2, label='y=1')
axes[0].set_xlabel('X[:,0]')
axes[0].set_ylabel('X[:,1]')
axes[0].legend(loc='lower left', fontsize='small')
k_fold = KFold(n_splits=FOLDS, shuffle=True, random_state=12345)
predictor = SVC(kernel='linear', C=1.0, probability=True, random_state=12345)
y_real = []
y_proba = []
for i, (train_index, test_index) in enumerate(k_fold.split(X)):
Xtrain, Xtest = X[train_index], X[test_index]
ytrain, ytest = y[train_index], y[test_index]
predictor.fit(Xtrain, ytrain)
pred_proba = predictor.predict_proba(Xtest)
precision, recall, _ = precision_recall_curve(ytest, pred_proba[:,1])
lab = 'Fold %d AUC=%.4f' % (i+1, auc(recall, precision))
axes[1].step(recall, precision, label=lab)
y_real.append(ytest)
y_proba.append(pred_proba[:,1])
y_real = numpy.concatenate(y_real)
y_proba = numpy.concatenate(y_proba)
precision, recall, _ = precision_recall_curve(y_real, y_proba)
lab = 'Overall AUC=%.4f' % (auc(recall, precision))
axes[1].step(recall, precision, label=lab, lw=2, color='black')
axes[1].set_xlabel('Recall')
axes[1].set_ylabel('Precision')
axes[1].legend(loc='lower left', fontsize='small')
f.tight_layout()
f.savefig('result.png')
Best Answer
The shuffling flag for
cross_validation.StratifiedKFold
has been introduced in the current version 0.15:http://scikit-learn.org/0.15/modules/generated/sklearn.cross_validation.StratifiedKFold.html
This can be found in the Changelog:
http://scikit-learn.org/stable/whats_new.html#new-features