Skip to content
This repository was archived by the owner on Dec 6, 2023. It is now read-only.

fix compatibility with scikit-learn by dropping dependency from sklearn.testing #158

Merged
merged 3 commits into from
May 5, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 6 additions & 8 deletions lightning/impl/datasets/tests/test_samples_generator.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,15 @@
import numpy as np

from sklearn.utils.testing import assert_equal

from lightning.impl.datasets.samples_generator import make_nn_regression


def test_make_nn_regression():
X, y, w = make_nn_regression(n_samples=10, n_features=50, n_informative=5)
assert_equal(X.shape[0], 10)
assert_equal(X.shape[1], 50)
assert_equal(y.shape[0], 10)
assert_equal(w.shape[0], 50)
assert_equal(np.sum(X.data != 0), 10 * 5)
assert X.shape[0] == 10
assert X.shape[1] == 50
assert y.shape[0] == 10
assert w.shape[0] == 50
assert np.sum(X.data != 0) == 10 * 5

X, y, w = make_nn_regression(n_samples=10, n_features=50, n_informative=50)
assert_equal(np.sum(X.data != 0), 10 * 50)
assert np.sum(X.data != 0) == 10 * 50
9 changes: 4 additions & 5 deletions lightning/impl/randomkit/tests/test_random.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,21 @@
import pickle
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_equal,
assert_equal)

from lightning.impl.randomkit import RandomState
from six.moves import xrange


def test_randint():
rs = RandomState(seed=0)
vals = [rs.randint(10) for t in xrange(10000)]
assert_almost_equal(np.mean(vals), 5.018)
np.testing.assert_almost_equal(np.mean(vals), 5.018)


def test_shuffle():
ind = np.arange(10)
rs = RandomState(seed=0)
rs.shuffle(ind)
assert_array_equal(ind, [2, 8, 4, 9, 1, 6, 7, 3, 0, 5])
np.testing.assert_array_equal(ind, [2, 8, 4, 9, 1, 6, 7, 3, 0, 5])


def test_random_state_pickle():
Expand All @@ -25,4 +24,4 @@ def test_random_state_pickle():
pickle_rs = pickle.dumps(rs)
pickle_rs = pickle.loads(pickle_rs)
pickle_random_integer = pickle_rs.randint(5)
assert_equal(random_integer, pickle_random_integer)
assert random_integer == pickle_random_integer
18 changes: 8 additions & 10 deletions lightning/impl/tests/test_adagrad.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
import numpy as np

from sklearn.datasets import load_iris
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal

from lightning.classification import AdaGradClassifier
from lightning.regression import AdaGradRegressor
Expand All @@ -20,44 +18,44 @@ def test_adagrad_elastic_hinge():
clf = AdaGradClassifier(alpha=0.5, l1_ratio=0.85, n_iter=10, random_state=0)
clf.fit(X_bin, y_bin)
assert not hasattr(clf, "predict_proba")
assert_equal(clf.score(X_bin, y_bin), 1.0)
assert clf.score(X_bin, y_bin) == 1.0


def test_adagrad_elastic_smooth_hinge():
clf = AdaGradClassifier(alpha=0.5, l1_ratio=0.85, loss="smooth_hinge",
n_iter=10, random_state=0)
clf.fit(X_bin, y_bin)
assert not hasattr(clf, "predict_proba")
assert_equal(clf.score(X_bin, y_bin), 1.0)
assert clf.score(X_bin, y_bin) == 1.0


def test_adagrad_elastic_log():
clf = AdaGradClassifier(alpha=0.1, l1_ratio=0.85, loss="log", n_iter=10,
random_state=0)
clf.fit(X_bin, y_bin)
assert_equal(clf.score(X_bin, y_bin), 1.0)
assert clf.score(X_bin, y_bin) == 1.0
check_predict_proba(clf, X_bin)


def test_adagrad_hinge_multiclass():
clf = AdaGradClassifier(alpha=1e-2, n_iter=100, loss="hinge", random_state=0)
clf.fit(X, y)
assert not hasattr(clf, "predict_proba")
assert_almost_equal(clf.score(X, y), 0.940, 3)
np.testing.assert_almost_equal(clf.score(X, y), 0.940, 3)


def test_adagrad_classes_binary():
clf = AdaGradClassifier()
assert not hasattr(clf, 'classes_')
clf.fit(X_bin, y_bin)
assert_equal(list(clf.classes_), [-1, 1])
assert list(clf.classes_) == [-1, 1]


def test_adagrad_classes_multiclass():
clf = AdaGradClassifier()
assert not hasattr(clf, 'classes_')
clf.fit(X, y)
assert_equal(list(clf.classes_), [0, 1, 2])
assert list(clf.classes_) == [0, 1, 2]


def test_adagrad_callback():
Expand All @@ -80,12 +78,12 @@ def __call__(self, clf, t):
clf = AdaGradClassifier(alpha=0.5, l1_ratio=0.85, n_iter=10,
callback=cb, random_state=0)
clf.fit(X_bin, y_bin)
assert_equal(cb.acc[-1], 1.0)
assert cb.acc[-1] == 1.0


def test_adagrad_regression():
for loss in ("squared", "absolute"):
reg = AdaGradRegressor(loss=loss)
reg.fit(X_bin, y_bin)
y_pred = np.sign(reg.predict(X_bin))
assert_equal(np.mean(y_bin == y_pred), 1.0)
assert np.mean(y_bin == y_pred) == 1.0
23 changes: 10 additions & 13 deletions lightning/impl/tests/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@
import numpy as np
import scipy.sparse as sp

from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from six.moves import xrange

from sklearn.datasets import make_classification
Expand Down Expand Up @@ -38,34 +35,34 @@ def test_contiguous_get_row():
ind = np.arange(X.shape[1])
for i in xrange(X.shape[0]):
indices, data, n_nz = cds.get_row(i)
assert_array_equal(indices, ind)
assert_array_equal(data, X[i])
assert_equal(n_nz, X.shape[1])
np.testing.assert_array_equal(indices, ind)
np.testing.assert_array_equal(data, X[i])
assert n_nz == X.shape[1]


def test_csr_get_row():
for i in xrange(X.shape[0]):
indices, data, n_nz = csr_ds.get_row(i)
for jj in xrange(n_nz):
j = indices[jj]
assert_equal(X[i, j], data[jj])
assert X[i, j] == data[jj]


def test_fortran_get_column():
ind = np.arange(X.shape[0])
for j in xrange(X.shape[1]):
indices, data, n_nz = fds.get_column(j)
assert_array_equal(indices, ind)
assert_array_equal(data, X[:, j])
assert_equal(n_nz, X.shape[0])
np.testing.assert_array_equal(indices, ind)
np.testing.assert_array_equal(data, X[:, j])
assert n_nz == X.shape[0]


def test_csc_get_column():
for j in xrange(X.shape[1]):
indices, data, n_nz = csc_ds.get_column(j)
for ii in xrange(n_nz):
i = indices[ii]
assert_equal(X[i, j], data[ii])
assert X[i, j] == data[ii]


def test_picklable_datasets():
Expand All @@ -74,5 +71,5 @@ def test_picklable_datasets():
for dataset in [cds, csr_ds, fds, csc_ds]:
pds = pickle.dumps(dataset)
dataset = pickle.loads(pds)
assert_equal(dataset.get_n_samples(), X.shape[0])
assert_equal(dataset.get_n_features(), X.shape[1])
assert dataset.get_n_samples() == X.shape[0]
assert dataset.get_n_features() == X.shape[1]
28 changes: 12 additions & 16 deletions lightning/impl/tests/test_dual_cd.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,6 @@
from sklearn.datasets import make_regression
from six.moves import xrange

from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal

from lightning.impl.datasets.samples_generator import make_classification
from lightning.impl.dual_cd import LinearSVC
from lightning.impl.dual_cd import LinearSVR
Expand Down Expand Up @@ -40,16 +36,16 @@ def test_sparse_dot():
K2[i, j] = sparse_dot(ds, i, j)
K2[j, i] = K[i, j]

assert_array_almost_equal(K, K2)
np.testing.assert_array_almost_equal(K, K2)


def test_fit_linear_binary():
for data in (bin_dense, bin_csr):
for loss in ("l1", "l2"):
clf = LinearSVC(loss=loss, random_state=0, max_iter=10)
clf.fit(data, bin_target)
assert_equal(list(clf.classes_), [0, 1])
assert_equal(clf.score(data, bin_target), 1.0)
assert list(clf.classes_) == [0, 1]
assert clf.score(data, bin_target) == 1.0
y_pred = clf.decision_function(data).ravel()


Expand All @@ -59,17 +55,17 @@ def test_fit_linear_binary_auc():
clf = LinearSVC(loss=loss, criterion="auc", random_state=0,
max_iter=25)
clf.fit(data, bin_target)
assert_equal(clf.score(data, bin_target), 1.0)
assert clf.score(data, bin_target) == 1.0


def test_fit_linear_multi():
for data in (mult_dense, mult_sparse):
clf = LinearSVC(random_state=0)
clf.fit(data, mult_target)
assert_equal(list(clf.classes_), [0, 1, 2])
assert list(clf.classes_) == [0, 1, 2]
y_pred = clf.predict(data)
acc = np.mean(y_pred == mult_target)
assert_greater(acc, 0.85)
assert acc > 0.85


def test_warm_start():
Expand All @@ -79,32 +75,32 @@ def test_warm_start():

clf.fit(bin_dense, bin_target)
acc = clf.score(bin_dense, bin_target)
assert_greater(acc, 0.99)
assert acc > 0.99


def test_linear_svr():
reg = LinearSVR(random_state=0)
reg.fit(reg_dense, reg_target)
assert_greater(reg.score(reg_dense, reg_target), 0.99)
assert reg.score(reg_dense, reg_target) > 0.99


def test_linear_svr_fit_intercept():
reg = LinearSVR(random_state=0, fit_intercept=True)
reg.fit(reg_dense, reg_target)
assert_greater(reg.score(reg_dense, reg_target), 0.99)
assert reg.score(reg_dense, reg_target) > 0.99


def test_linear_svr_l2():
reg = LinearSVR(loss="l2", random_state=0)
reg.fit(reg_dense, reg_target)
assert_greater(reg.score(reg_dense, reg_target), 0.99)
assert reg.score(reg_dense, reg_target) > 0.99


def test_linear_svr_warm_start():
reg = LinearSVR(C=1e-3, random_state=0, warm_start=True)
reg.fit(reg_dense, reg_target)
assert_greater(reg.score(reg_dense, reg_target), 0.96)
assert reg.score(reg_dense, reg_target) > 0.96

reg.C = 1
reg.fit(reg_dense, reg_target)
assert_greater(reg.score(reg_dense, reg_target), 0.99)
assert reg.score(reg_dense, reg_target) > 0.99
Loading