Update to work with new versions of scipy and scikit
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 21 KiB |
Before Width: | Height: | Size: 123 KiB After Width: | Height: | Size: 122 KiB |
Before Width: | Height: | Size: 50 KiB After Width: | Height: | Size: 51 KiB |
Before Width: | Height: | Size: 175 KiB After Width: | Height: | Size: 176 KiB |
Before Width: | Height: | Size: 174 KiB After Width: | Height: | Size: 175 KiB |
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 19 KiB |
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 19 KiB |
|
@ -3,8 +3,7 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
from sklearn.externals import six
|
||||
import six
|
||||
|
||||
__all__ = [
|
||||
'BaseSimpleEstimator'
|
||||
|
|
|
@ -103,4 +103,4 @@ class KNNClassifier(BaseSimpleEstimator):
|
|||
# We want the most common along the rows as the predictions
|
||||
# I.e:
|
||||
# array([1, ..., 0])
|
||||
return mode(predicted_labels, axis=-1)[0].ravel()
|
||||
return mode(predicted_labels, axis=1)[0].ravel()
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from sklearn.externals import six
|
||||
import six
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
import numpy as np
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from sklearn.externals import six
|
||||
import six
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
__all__ = [
|
||||
|
|
|
@ -70,7 +70,7 @@ class SimpleLinearRegression(BaseSimpleEstimator):
|
|||
# Let's compute the least squares on X wrt y
|
||||
# Least squares solves the equation `a x = b` by computing a
|
||||
# vector `x` that minimizes the Euclidean 2-norm `|| b - a x ||^2`.
|
||||
theta, _, rank, singular_values = lstsq(X, y)
|
||||
theta, _, rank, singular_values = lstsq(X, y, rcond=None)
|
||||
|
||||
# finally, we compute the intercept values as the mean of the target
|
||||
# variable MINUS the inner product of the X_means and the coefficients
|
||||
|
|
|
@ -5,7 +5,6 @@ from __future__ import absolute_import
|
|||
from sklearn.datasets import load_iris
|
||||
from packtml.utils import linalg
|
||||
|
||||
from numpy.testing import assert_array_almost_equal
|
||||
import numpy as np
|
||||
|
||||
iris = load_iris()
|
||||
|
@ -17,7 +16,7 @@ def test_row_norms():
|
|||
X_centered = X - means
|
||||
|
||||
norms = linalg.l2_norm(X_centered, axis=0)
|
||||
assert_array_almost_equal(
|
||||
assert np.allclose(
|
||||
norms,
|
||||
np.array([ 10.10783524, 5.29269308,
|
||||
21.53749599, 9.31556404]))
|
||||
np.array([10.10783524, 5.29269308, 21.53749599, 9.31556404]),
|
||||
rtol=0.01)
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from sklearn.externals import six
|
||||
from sklearn.utils.validation import check_random_state
|
||||
from sklearn.model_selection import ShuffleSplit
|
||||
|
||||
import numpy as np
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
numpy>=0.11
|
||||
numpy>=0.15
|
||||
scipy>=0.19
|
||||
scikit-learn>=0.18
|
||||
pandas
|
||||
scikit-learn>=0.19
|
||||
pandas>=0.23
|
||||
matplotlib
|